From 6ec9c9eb77a04b9ac26ab20093305b355f322517 Mon Sep 17 00:00:00 2001 From: Sabbir Date: Wed, 14 Aug 2024 20:18:02 +0600 Subject: [PATCH] Add db-client-go for Cassandra Signed-off-by: Sabbir --- cassandra/client.go | 9 + cassandra/kubedb_client_builder.go | 107 + go.mod | 4 +- go.sum | 15 +- vendor/github.com/gocql/gocql/.gitignore | 5 + vendor/github.com/gocql/gocql/AUTHORS | 143 + vendor/github.com/gocql/gocql/CHANGELOG.md | 163 + vendor/github.com/gocql/gocql/CONTRIBUTING.md | 78 + vendor/github.com/gocql/gocql/LICENSE | 27 + vendor/github.com/gocql/gocql/README.md | 166 + .../gocql/gocql/address_translators.go | 26 + vendor/github.com/gocql/gocql/cluster.go | 315 + vendor/github.com/gocql/gocql/compressor.go | 28 + vendor/github.com/gocql/gocql/conn.go | 1745 +++++ .../github.com/gocql/gocql/connectionpool.go | 622 ++ vendor/github.com/gocql/gocql/control.go | 522 ++ vendor/github.com/gocql/gocql/cqltypes.go | 11 + vendor/github.com/gocql/gocql/debug_off.go | 6 + vendor/github.com/gocql/gocql/debug_on.go | 6 + vendor/github.com/gocql/gocql/dial.go | 91 + vendor/github.com/gocql/gocql/doc.go | 360 + vendor/github.com/gocql/gocql/errors.go | 198 + vendor/github.com/gocql/gocql/events.go | 246 + vendor/github.com/gocql/gocql/filters.go | 57 + vendor/github.com/gocql/gocql/frame.go | 2052 ++++++ vendor/github.com/gocql/gocql/fuzz.go | 34 + vendor/github.com/gocql/gocql/helpers.go | 448 ++ vendor/github.com/gocql/gocql/host_source.go | 899 +++ .../github.com/gocql/gocql/host_source_gen.go | 46 + .../gocql/gocql/install_test_deps.sh | 12 + vendor/github.com/gocql/gocql/integration.sh | 96 + .../gocql/gocql/internal/lru/lru.go | 127 + .../gocql/gocql/internal/murmur/murmur.go | 135 + .../gocql/internal/murmur/murmur_appengine.go | 11 + .../gocql/internal/murmur/murmur_unsafe.go | 16 + .../gocql/gocql/internal/streams/streams.go | 140 + vendor/github.com/gocql/gocql/logger.go | 40 + vendor/github.com/gocql/gocql/marshal.go | 2727 +++++++ vendor/github.com/gocql/gocql/metadata.go | 1465 ++++ vendor/github.com/gocql/gocql/policies.go | 1090 +++ .../github.com/gocql/gocql/prepared_cache.go | 77 + .../github.com/gocql/gocql/query_executor.go | 180 + vendor/github.com/gocql/gocql/ring.go | 143 + vendor/github.com/gocql/gocql/session.go | 2282 ++++++ vendor/github.com/gocql/gocql/token.go | 222 + vendor/github.com/gocql/gocql/topology.go | 294 + vendor/github.com/gocql/gocql/uuid.go | 324 + vendor/github.com/gocql/gocql/version.go | 28 + .../hailocab/go-hostpool/.gitignore | 22 + .../hailocab/go-hostpool/.travis.yml | 0 .../github.com/hailocab/go-hostpool/LICENSE | 21 + .../github.com/hailocab/go-hostpool/README.md | 17 + .../hailocab/go-hostpool/epsilon_greedy.go | 220 + .../go-hostpool/epsilon_value_calculators.go | 40 + .../hailocab/go-hostpool/host_entry.go | 62 + .../hailocab/go-hostpool/hostpool.go | 243 + .../v1alpha1/cassandra_version_helpers.go | 63 + .../v1alpha1/cassandra_version_types.go | 97 + .../v1alpha1/clickhouse_version_types.go | 2 +- .../catalog/v1alpha1/druid_version_types.go | 2 +- .../v1alpha1/elasticsearch_version_types.go | 2 +- .../catalog/v1alpha1/etcd_version_types.go | 2 +- .../v1alpha1/ferretdb_version_types.go | 2 +- .../v1alpha1/kafka_connector_version_types.go | 2 +- .../catalog/v1alpha1/kafka_version_types.go | 2 +- .../catalog/v1alpha1/mariadb_version_types.go | 2 +- .../v1alpha1/memcached_version_types.go | 2 +- .../catalog/v1alpha1/mongodb_version_types.go | 2 +- .../catalog/v1alpha1/mssql_version_types.go | 2 +- .../catalog/v1alpha1/mysql_version_types.go | 2 +- .../catalog/v1alpha1/openapi_generated.go | 212 + .../v1alpha1/perconaxtradb_version_types.go | 2 +- .../v1alpha1/pgbouncer_version_types.go | 2 +- .../catalog/v1alpha1/pgpool_version_types.go | 2 +- .../v1alpha1/postgres_version_types.go | 2 +- .../v1alpha1/proxysql_version_types.go | 2 +- .../catalog/v1alpha1/rabbitmqversion_types.go | 2 +- .../catalog/v1alpha1/redis_version_types.go | 2 +- .../apis/catalog/v1alpha1/register.go | 2 + .../v1alpha1/schemaregistry_version_types.go | 2 +- .../v1alpha1/singlestore_version_types.go | 2 +- .../catalog/v1alpha1/solr_version_types.go | 2 +- .../v1alpha1/zookeeper_version_types.go | 2 +- .../catalog/v1alpha1/zz_generated.deepcopy.go | 135 + .../v1alpha1/elasticsearchdashboard_types.go | 2 +- .../kafka/v1alpha1/connect_cluster_types.go | 2 +- .../apis/kafka/v1alpha1/connector_types.go | 2 +- .../apis/kafka/v1alpha1/restproxy_types.go | 2 +- .../kafka/v1alpha1/schemaregistry_types.go | 2 +- .../apimachinery/apis/kubedb/constants.go | 53 +- .../apis/kubedb/v1/kafka_helpers.go | 8 + .../apis/kubedb/v1/kafka_types.go | 6 +- .../apis/kubedb/v1/kafka_webhooks.go | 11 + .../apis/kubedb/v1/mongodb_helpers.go | 6 +- .../apis/kubedb/v1/openapi_generated.go | 7 + .../apis/kubedb/v1/pgbouncer_types.go | 2 +- .../apis/kubedb/v1alpha2/cassandra_helpers.go | 322 + .../apis/kubedb/v1alpha2/cassandra_types.go | 147 + .../apis/kubedb/v1alpha2/clickhouse_types.go | 2 +- .../apis/kubedb/v1alpha2/druid_types.go | 2 +- .../apis/kubedb/v1alpha2/ferretdb_types.go | 2 +- .../apis/kubedb/v1alpha2/kafka_helpers.go | 8 + .../apis/kubedb/v1alpha2/kafka_types.go | 6 +- .../apis/kubedb/v1alpha2/kafka_webhook.go | 11 + .../apis/kubedb/v1alpha2/openapi_generated.go | 365 + .../apis/kubedb/v1alpha2/pgbouncer_types.go | 2 +- .../apis/kubedb/v1alpha2/rabbitmq_helpers.go | 9 +- .../apis/kubedb/v1alpha2/rabbitmq_types.go | 18 +- .../apis/kubedb/v1alpha2/register.go | 2 + .../apis/kubedb/v1alpha2/singlestore_types.go | 2 +- .../apis/kubedb/v1alpha2/solr_helpers.go | 10 +- .../apis/kubedb/v1alpha2/solr_types.go | 2 +- .../apis/kubedb/v1alpha2/types.go | 9 +- .../apis/kubedb/v1alpha2/zookeeper_types.go | 2 +- .../v1alpha2/zz_generated.conversion.go | 2 + .../kubedb/v1alpha2/zz_generated.deepcopy.go | 218 + ...ling.kubedb.com_clickhouseautoscalers.yaml | 2 +- ...toscaling.kubedb.com_druidautoscalers.yaml | 2 +- ...g.kubedb.com_elasticsearchautoscalers.yaml | 2 +- ...utoscaling.kubedb.com_etcdautoscalers.yaml | 2 +- ...caling.kubedb.com_ferretdbautoscalers.yaml | 2 +- ...toscaling.kubedb.com_kafkaautoscalers.yaml | 2 +- ...scaling.kubedb.com_mariadbautoscalers.yaml | 2 +- ...aling.kubedb.com_memcachedautoscalers.yaml | 2 +- ...scaling.kubedb.com_mongodbautoscalers.yaml | 2 +- ...ing.kubedb.com_mssqlserverautoscalers.yaml | 2 +- ...toscaling.kubedb.com_mysqlautoscalers.yaml | 2 +- ...g.kubedb.com_perconaxtradbautoscalers.yaml | 2 +- ...aling.kubedb.com_pgbouncerautoscalers.yaml | 2 +- ...oscaling.kubedb.com_pgpoolautoscalers.yaml | 2 +- ...caling.kubedb.com_postgresautoscalers.yaml | 2 +- ...caling.kubedb.com_proxysqlautoscalers.yaml | 18 +- ...caling.kubedb.com_rabbitmqautoscalers.yaml | 2 +- ...toscaling.kubedb.com_redisautoscalers.yaml | 2 +- ...g.kubedb.com_redissentinelautoscalers.yaml | 2 +- ...ing.kubedb.com_singlestoreautoscalers.yaml | 2 +- ...utoscaling.kubedb.com_solrautoscalers.yaml | 2 +- ...aling.kubedb.com_zookeeperautoscalers.yaml | 2 +- .../catalog.kubedb.com_cassandraversions.yaml | 95 + ...catalog.kubedb.com_clickhouseversions.yaml | 2 +- .../catalog.kubedb.com_druidversions.yaml | 2 +- ...alog.kubedb.com_elasticsearchversions.yaml | 2 +- .../crds/catalog.kubedb.com_etcdversions.yaml | 2 +- .../catalog.kubedb.com_ferretdbversions.yaml | 2 +- ...log.kubedb.com_kafkaconnectorversions.yaml | 2 +- .../catalog.kubedb.com_kafkaversions.yaml | 2 +- .../catalog.kubedb.com_mariadbversions.yaml | 2 +- .../catalog.kubedb.com_memcachedversions.yaml | 2 +- .../catalog.kubedb.com_mongodbversions.yaml | 2 +- ...atalog.kubedb.com_mssqlserverversions.yaml | 2 +- .../catalog.kubedb.com_mysqlversions.yaml | 2 +- ...alog.kubedb.com_perconaxtradbversions.yaml | 2 +- .../catalog.kubedb.com_pgbouncerversions.yaml | 2 +- .../catalog.kubedb.com_pgpoolversions.yaml | 2 +- .../catalog.kubedb.com_postgresversions.yaml | 2 +- .../catalog.kubedb.com_proxysqlversions.yaml | 2 +- .../catalog.kubedb.com_rabbitmqversions.yaml | 2 +- .../catalog.kubedb.com_redisversions.yaml | 2 +- ...log.kubedb.com_schemaregistryversions.yaml | 2 +- ...atalog.kubedb.com_singlestoreversions.yaml | 2 +- .../crds/catalog.kubedb.com_solrversions.yaml | 2 +- .../catalog.kubedb.com_zookeeperversions.yaml | 2 +- ...ch.kubedb.com_elasticsearchdashboards.yaml | 4 + .../kafka.kubedb.com_connectclusters.yaml | 4 + .../crds/kafka.kubedb.com_connectors.yaml | 4 + .../crds/kafka.kubedb.com_restproxies.yaml | 4 + .../kafka.kubedb.com_schemaregistries.yaml | 4 + .../crds/kubedb.com_cassandras.yaml | 6546 +++++++++++++++++ .../crds/kubedb.com_clickhouses.yaml | 6 + .../apimachinery/crds/kubedb.com_druids.yaml | 6 + .../crds/kubedb.com_elasticsearches.yaml | 1 + .../apimachinery/crds/kubedb.com_etcds.yaml | 1 + .../crds/kubedb.com_ferretdbs.yaml | 6 + .../apimachinery/crds/kubedb.com_kafkas.yaml | 10 + .../crds/kubedb.com_mariadbs.yaml | 1 + .../crds/kubedb.com_memcacheds.yaml | 1 + .../crds/kubedb.com_mongodbs.yaml | 1 + .../crds/kubedb.com_mssqlservers.yaml | 1 + .../apimachinery/crds/kubedb.com_mysqls.yaml | 1 + .../crds/kubedb.com_perconaxtradbs.yaml | 1 + .../crds/kubedb.com_pgbouncers.yaml | 3 +- .../apimachinery/crds/kubedb.com_pgpools.yaml | 1 + .../crds/kubedb.com_postgreses.yaml | 1 + .../crds/kubedb.com_proxysqls.yaml | 1 + .../crds/kubedb.com_rabbitmqs.yaml | 17 + .../apimachinery/crds/kubedb.com_redises.yaml | 1 + .../crds/kubedb.com_redissentinels.yaml | 1 + .../crds/kubedb.com_singlestores.yaml | 6 + .../apimachinery/crds/kubedb.com_solrs.yaml | 6 + .../crds/kubedb.com_zookeepers.yaml | 6 + .../ops.kubedb.com_clickhouseopsrequests.yaml | 131 + .../crds/ops.kubedb.com_druidopsrequests.yaml | 2 +- ...s.kubedb.com_elasticsearchopsrequests.yaml | 2 +- .../crds/ops.kubedb.com_etcdopsrequests.yaml | 2 +- .../ops.kubedb.com_ferretdbopsrequests.yaml | 2 +- .../crds/ops.kubedb.com_kafkaopsrequests.yaml | 2 +- .../ops.kubedb.com_mariadbopsrequests.yaml | 2 +- .../ops.kubedb.com_memcachedopsrequests.yaml | 2 +- .../ops.kubedb.com_mongodbopsrequests.yaml | 2 +- ...ops.kubedb.com_mssqlserveropsrequests.yaml | 131 + .../crds/ops.kubedb.com_mysqlopsrequests.yaml | 2 +- ...s.kubedb.com_perconaxtradbopsrequests.yaml | 2 +- .../ops.kubedb.com_pgbounceropsrequests.yaml | 2 +- .../ops.kubedb.com_pgpoolopsrequests.yaml | 2 +- .../ops.kubedb.com_postgresopsrequests.yaml | 2 +- .../ops.kubedb.com_proxysqlopsrequests.yaml | 2 +- .../ops.kubedb.com_rabbitmqopsrequests.yaml | 2 +- .../crds/ops.kubedb.com_redisopsrequests.yaml | 2 +- ...s.kubedb.com_redissentinelopsrequests.yaml | 2 +- ...ops.kubedb.com_singlestoreopsrequests.yaml | 2 +- .../crds/ops.kubedb.com_solropsrequests.yaml | 2 +- .../ops.kubedb.com_zookeeperopsrequests.yaml | 131 + .../crds/postgres.kubedb.com_publishers.yaml | 3 +- .../crds/postgres.kubedb.com_subscribers.yaml | 3 +- .../schema.kubedb.com_mariadbdatabases.yaml | 3 +- .../schema.kubedb.com_mongodbdatabases.yaml | 3 +- .../schema.kubedb.com_mysqldatabases.yaml | 3 +- .../schema.kubedb.com_postgresdatabases.yaml | 3 +- vendor/modules.txt | 11 +- 219 files changed, 27422 insertions(+), 146 deletions(-) create mode 100644 cassandra/client.go create mode 100644 cassandra/kubedb_client_builder.go create mode 100644 vendor/github.com/gocql/gocql/.gitignore create mode 100644 vendor/github.com/gocql/gocql/AUTHORS create mode 100644 vendor/github.com/gocql/gocql/CHANGELOG.md create mode 100644 vendor/github.com/gocql/gocql/CONTRIBUTING.md create mode 100644 vendor/github.com/gocql/gocql/LICENSE create mode 100644 vendor/github.com/gocql/gocql/README.md create mode 100644 vendor/github.com/gocql/gocql/address_translators.go create mode 100644 vendor/github.com/gocql/gocql/cluster.go create mode 100644 vendor/github.com/gocql/gocql/compressor.go create mode 100644 vendor/github.com/gocql/gocql/conn.go create mode 100644 vendor/github.com/gocql/gocql/connectionpool.go create mode 100644 vendor/github.com/gocql/gocql/control.go create mode 100644 vendor/github.com/gocql/gocql/cqltypes.go create mode 100644 vendor/github.com/gocql/gocql/debug_off.go create mode 100644 vendor/github.com/gocql/gocql/debug_on.go create mode 100644 vendor/github.com/gocql/gocql/dial.go create mode 100644 vendor/github.com/gocql/gocql/doc.go create mode 100644 vendor/github.com/gocql/gocql/errors.go create mode 100644 vendor/github.com/gocql/gocql/events.go create mode 100644 vendor/github.com/gocql/gocql/filters.go create mode 100644 vendor/github.com/gocql/gocql/frame.go create mode 100644 vendor/github.com/gocql/gocql/fuzz.go create mode 100644 vendor/github.com/gocql/gocql/helpers.go create mode 100644 vendor/github.com/gocql/gocql/host_source.go create mode 100644 vendor/github.com/gocql/gocql/host_source_gen.go create mode 100644 vendor/github.com/gocql/gocql/install_test_deps.sh create mode 100644 vendor/github.com/gocql/gocql/integration.sh create mode 100644 vendor/github.com/gocql/gocql/internal/lru/lru.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go create mode 100644 vendor/github.com/gocql/gocql/internal/streams/streams.go create mode 100644 vendor/github.com/gocql/gocql/logger.go create mode 100644 vendor/github.com/gocql/gocql/marshal.go create mode 100644 vendor/github.com/gocql/gocql/metadata.go create mode 100644 vendor/github.com/gocql/gocql/policies.go create mode 100644 vendor/github.com/gocql/gocql/prepared_cache.go create mode 100644 vendor/github.com/gocql/gocql/query_executor.go create mode 100644 vendor/github.com/gocql/gocql/ring.go create mode 100644 vendor/github.com/gocql/gocql/session.go create mode 100644 vendor/github.com/gocql/gocql/token.go create mode 100644 vendor/github.com/gocql/gocql/topology.go create mode 100644 vendor/github.com/gocql/gocql/uuid.go create mode 100644 vendor/github.com/gocql/gocql/version.go create mode 100644 vendor/github.com/hailocab/go-hostpool/.gitignore create mode 100644 vendor/github.com/hailocab/go-hostpool/.travis.yml create mode 100644 vendor/github.com/hailocab/go-hostpool/LICENSE create mode 100644 vendor/github.com/hailocab/go-hostpool/README.md create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go create mode 100644 vendor/github.com/hailocab/go-hostpool/host_entry.go create mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool.go create mode 100644 vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_helpers.go create mode 100644 vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_types.go create mode 100644 vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go create mode 100644 vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_types.go create mode 100644 vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_cassandraversions.yaml create mode 100644 vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml create mode 100644 vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml create mode 100644 vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml create mode 100644 vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml diff --git a/cassandra/client.go b/cassandra/client.go new file mode 100644 index 000000000..cc55d5ad4 --- /dev/null +++ b/cassandra/client.go @@ -0,0 +1,9 @@ +package cassandra + +import ( + "github.com/gocql/gocql" +) + +type Client struct { + *gocql.Session +} diff --git a/cassandra/kubedb_client_builder.go b/cassandra/kubedb_client_builder.go new file mode 100644 index 000000000..8614c2db3 --- /dev/null +++ b/cassandra/kubedb_client_builder.go @@ -0,0 +1,107 @@ +package cassandra + +import ( + "context" + "fmt" + "github.com/gocql/gocql" + api "kubedb.dev/apimachinery/apis/kubedb/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "strconv" +) + +type KubeDBClientBuilder struct { + kc client.Client + db *api.Cassandra + url string + podName string + port *int + ctx context.Context +} + +func NewKubeDBClientBuilder(kc client.Client, db *api.Cassandra) *KubeDBClientBuilder { + return &KubeDBClientBuilder{ + kc: kc, + db: db, + } +} + +func (o *KubeDBClientBuilder) WithURL(url string) *KubeDBClientBuilder { + o.url = url + return o +} + +func (o *KubeDBClientBuilder) WithPort(port *int) *KubeDBClientBuilder { + o.port = port + return o +} + +func (o *KubeDBClientBuilder) WithContext(ctx context.Context) *KubeDBClientBuilder { + o.ctx = ctx + return o +} +func (o *KubeDBClientBuilder) GetCassandraClient() (*Client, error) { + host := "127.0.0.1" + port := "9042" + username := "cluster1-superuser" + password := "X-XLZ0jxouJbAbg0KAsPW0a1E0GofwK12cWOA9WyHYCjdede2UBImQ" + + cluster := gocql.NewCluster(host) + p, err := strconv.Atoi(port) + if err != nil { + return nil, fmt.Errorf("invalid port: %v", err) + } + cluster.Port = p + cluster.Keyspace = "system" + //cluster.Consistency = gocql.Any //ANY ConsistencyLevel is only supported for writes + cluster.Consistency = gocql.Quorum + cluster.Authenticator = gocql.PasswordAuthenticator{ + Username: username, + Password: password, + } + session, err := cluster.CreateSession() + if err != nil { + return nil, fmt.Errorf("unable to connect to Cassandra cluster: %v", err) + } + + return &Client{session}, nil +} + +// CreateKeyspace creates a keyspace +func (c *Client) CreateKeyspace() error { + return c.Query(`CREATE KEYSPACE IF NOT EXISTS mykeyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2'}`).Exec() +} + +// CreateTable creates a table +func (c *Client) CreateTable() error { + return c.Query(`CREATE TABLE IF NOT EXISTS mykeyspace.users ( + id UUID PRIMARY KEY, + name TEXT, + age INT, + email TEXT + )`).Exec() +} + +// InsertUser inserts a user into the table +func (c *Client) InsertUser(id gocql.UUID, name string, age int, email string) error { + return c.Query(`INSERT INTO mykeyspace.users (id, name, age, email) VALUES (?, ?, ?, ?)`, + id, name, age, email).Exec() +} +func (c *Client) DeleteUser(id gocql.UUID) error { + return c.Query(`DELETE FROM mykeyspace.users WHERE id = ?`, id).Exec() +} + +// QueryUser queries a user by ID +func (c *Client) QueryUser(id gocql.UUID) (string, int, string, error) { + var name string + var age int + var email string + + iter := c.Query(`SELECT name, age, email FROM mykeyspace.users WHERE id = ?`, id).Iter() + if iter.Scan(&name, &age, &email) { + if err := iter.Close(); err != nil { + return "", 0, "", fmt.Errorf("unable to query data: %v", err) + } + return name, age, email, nil + } + return "", 0, "", fmt.Errorf("no data found") +} diff --git a/go.mod b/go.mod index 0207a07bd..8f28c17cd 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/go-resty/resty/v2 v2.11.0 github.com/go-sql-driver/mysql v1.8.1 + github.com/gocql/gocql v1.6.0 github.com/grafadruid/go-druid v0.0.6 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/lib/pq v1.10.7 @@ -32,7 +33,7 @@ require ( k8s.io/klog/v2 v2.130.1 kmodules.xyz/client-go v0.30.9 kmodules.xyz/custom-resources v0.30.0 - kubedb.dev/apimachinery v0.47.0-rc.2 + kubedb.dev/apimachinery v0.47.0-rc.2.0.20240814122107-b1472e3500d3 sigs.k8s.io/controller-runtime v0.18.4 xorm.io/xorm v1.3.6 ) @@ -73,6 +74,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect diff --git a/go.sum b/go.sum index 6a4597a95..b197238f0 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,12 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jely github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -145,6 +149,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/goccy/go-json v0.8.1/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU= +github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -171,6 +177,7 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -203,6 +210,8 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/grafadruid/go-druid v0.0.6 h1:Nt9jQrhrtHi1BJICN9aDJgYDmBmc10pJYpQiuwAsxa4= github.com/grafadruid/go-druid v0.0.6/go.mod h1:KY3a6MrVMKkXgMTwBS9Nrhm1E8OWyR4gd0WzUi8d/zM= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -793,9 +802,9 @@ kmodules.xyz/monitoring-agent-api v0.29.0 h1:gpFl6OZrlMLb/ySMHdREI9EwGtnJ91oZBn9 kmodules.xyz/monitoring-agent-api v0.29.0/go.mod h1:iNbvaMTgVFOI5q2LJtGK91j4Dmjv4ZRiRdasGmWLKQI= kmodules.xyz/offshoot-api v0.30.0 h1:dq9F93pu4Q8rL9oTcCk+vGGy8vpS7RNt0GSwx7Bvhec= kmodules.xyz/offshoot-api v0.30.0/go.mod h1:o9VoA3ImZMDBp3lpLb8+kc2d/KBxioRwCpaKDfLIyDw= -kubedb.dev/apimachinery v0.47.0-rc.2 h1:pdtbBiLM0VQw8HoJ3Z2tp4PZX5y66CIYBRCIn7inADI= -kubedb.dev/apimachinery v0.47.0-rc.2/go.mod h1:XkzWrijuH4skCU2ru+Ye9O0KSWQ7Nn2mRjP3+F3StLk= -kubeops.dev/petset v0.0.6 h1:0IbvxD9fadZfH+3iMZWzN6ZHsO0vX458JlioamwyPKQ= +kubedb.dev/apimachinery v0.47.0-rc.2.0.20240814122107-b1472e3500d3 h1:x+ep22DMGmkSg2UdraSnt9pVVUjHqW9TuPwPw35QI+w= +kubedb.dev/apimachinery v0.47.0-rc.2.0.20240814122107-b1472e3500d3/go.mod h1:XkzWrijuH4skCU2ru+Ye9O0KSWQ7Nn2mRjP3+F3StLk= +kubeops.dev/petset v0.0.6 h1:pmDrb29zrG1TLMV3mEi18VTP7hKcgDbiP1+MPXc5M+M= kubeops.dev/petset v0.0.6/go.mod h1:A15vh0r979NsvL65DTIZKWsa/NoX9VapHBAEw1ZsdYI= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= diff --git a/vendor/github.com/gocql/gocql/.gitignore b/vendor/github.com/gocql/gocql/.gitignore new file mode 100644 index 000000000..bce6cf584 --- /dev/null +++ b/vendor/github.com/gocql/gocql/.gitignore @@ -0,0 +1,5 @@ +gocql-fuzz +fuzz-corpus +fuzz-work +gocql.test +.idea diff --git a/vendor/github.com/gocql/gocql/AUTHORS b/vendor/github.com/gocql/gocql/AUTHORS new file mode 100644 index 000000000..327570751 --- /dev/null +++ b/vendor/github.com/gocql/gocql/AUTHORS @@ -0,0 +1,143 @@ +# This source file refers to The gocql Authors for copyright purposes. + +Christoph Hack +Jonathan Rudenberg +Thorsten von Eicken +Matt Robenolt +Phillip Couto +Niklas Korz +Nimi Wariboko Jr +Ghais Issa +Sasha Klizhentas +Konstantin Cherkasov +Ben Hood <0x6e6562@gmail.com> +Pete Hopkins +Chris Bannister +Maxim Bublis +Alex Zorin +Kasper Middelboe Petersen +Harpreet Sawhney +Charlie Andrews +Stanislavs Koikovs +Dan Forest +Miguel Serrano +Stefan Radomski +Josh Wright +Jacob Rhoden +Ben Frye +Fred McCann +Dan Simmons +Muir Manders +Sankar P +Julien Da Silva +Dan Kennedy +Nick Dhupia +Yasuharu Goto +Jeremy Schlatter +Matthias Kadenbach +Dean Elbaz +Mike Berman +Dmitriy Fedorenko +Zach Marcantel +James Maloney +Ashwin Purohit +Dan Kinder +Oliver Beattie +Justin Corpron +Miles Delahunty +Zach Badgett +Maciek Sakrejda +Jeff Mitchell +Baptiste Fontaine +Matt Heath +Jamie Cuthill +Adrian Casajus +John Weldon +Adrien Bustany +Andrey Smirnov +Adam Weiner +Daniel Cannon +Johnny Bergström +Adriano Orioli +Claudiu Raveica +Artem Chernyshev +Ference Fu +LOVOO +nikandfor +Anthony Woods +Alexander Inozemtsev +Rob McColl ; +Viktor Tönköl +Ian Lozinski +Michael Highstead +Sarah Brown +Caleb Doxsey +Frederic Hemery +Pekka Enberg +Mark M +Bartosz Burclaf +Marcus King +Andrew de Andrade +Robert Nix +Nathan Youngman +Charles Law ; +Nathan Davies +Bo Blanton +Vincent Rischmann +Jesse Claven +Derrick Wippler +Leigh McCulloch +Ron Kuris +Raphael Gavache +Yasser Abdolmaleki +Krishnanand Thommandra +Blake Atkinson +Dharmendra Parsaila +Nayef Ghattas +Michał Matczuk +Ben Krebsbach +Vivian Mathews +Sascha Steinbiss +Seth Rosenblum +Javier Zunzunegui +Luke Hines +Zhixin Wen +Chang Liu +Ingo Oeser +Luke Hines +Jacob Greenleaf +Alex Lourie ; +Marco Cadetg +Karl Matthias +Thomas Meson +Martin Sucha ; +Pavel Buchinchik +Rintaro Okamura +Yura Sokolov ; +Jorge Bay +Dmitriy Kozlov +Alexey Romanovsky +Jaume Marhuenda Beltran +Piotr Dulikowski +Árni Dagur +Tushar Das +Maxim Vladimirskiy +Bogdan-Ciprian Rusu +Yuto Doi +Krishna Vadali +Jens-W. Schicke-Uffmann +Ondrej Polakovič +Sergei Karetnikov +Stefan Miklosovic +Adam Burk +Valerii Ponomarov +Neal Turett +Doug Schaapveld +Steven Seidman +Wojciech Przytuła +João Reis +Lauro Ramos Venancio +Dmitry Kropachev +Oliver Boyle +Jackson Fleming +Sylwia Szunejko diff --git a/vendor/github.com/gocql/gocql/CHANGELOG.md b/vendor/github.com/gocql/gocql/CHANGELOG.md new file mode 100644 index 000000000..8dfb22828 --- /dev/null +++ b/vendor/github.com/gocql/gocql/CHANGELOG.md @@ -0,0 +1,163 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +### Changed + +### Fixed + +## [1.6.0] - 2023-08-28 + +### Added +- Added the InstaclustrPasswordAuthenticator to the list of default approved authenticators. (#1711) +- Added the `com.scylladb.auth.SaslauthdAuthenticator` and `com.scylladb.auth.TransitionalAuthenticator` + to the list of default approved authenticators. (#1712) +- Added transferring Keyspace and Table names to the Query from the prepared response and updating + information about that every time this information is received. (#1714) + +### Changed +- Tracer created with NewTraceWriter now includes the thread information from trace events in the output. (#1716) +- Increased default timeouts so that they are higher than Cassandra default timeouts. + This should help prevent issues where a default configuration overloads a server using default timeouts + during retries. (#1701, #1719) + +## [1.5.2] - 2023-06-12 + +Same as 1.5.0. GitHub does not like gpg signed text in the tag message (even with prefixed armor), +so pushing a new tag. + +## [1.5.1] - 2023-06-12 + +Same as 1.5.0. GitHub does not like gpg signed text in the tag message, +so pushing a new tag. + +## [1.5.0] - 2023-06-12 + +### Added + +- gocql now advertises the driver name and version in the STARTUP message to the server. + The values are taken from the Go module's path and version + (or from the replacement module, if used). (#1702) + That allows the server to track which fork of the driver is being used. +- Query.Values() to retrieve the values bound to the Query. + This makes writing wrappers around Query easier. (#1700) + +### Fixed +- Potential panic on deserialization (#1695) +- Unmarshalling of dates outside of `[1677-09-22, 2262-04-11]` range. (#1692) + +## [1.4.0] - 2023-04-26 + +### Added + +### Changed + +- gocql now refreshes the entire ring when it receives a topology change event and + when control connection is re-connected. + This simplifies code managing ring state. (#1680) +- Supported versions of Cassandra that we test against are now 4.0.x and 4.1.x. (#1685) +- Default HostDialer now uses already-resolved connect address instead of hostname when establishing TCP connections (#1683). + +### Fixed + +- Deadlock in Session.Close(). (#1688) +- Race between Query.Release() and speculative executions (#1684) +- Missed ring update during control connection reconnection (#1680) + +## [1.3.2] - 2023-03-27 + +### Changed + +- Supported versions of Go that we test against are now Go 1.19 and Go 1.20. + +### Fixed + +- Node event handling now processes topology events before status events. + This fixes some cases where new nodes were missed. (#1682) +- Learning a new IP address for an existing node (identified by host ID) now triggers replacement of that host. + This fixes some Kubernetes reconnection failures. (#1682) +- Refresh ring when processing a node UP event for an unknown host. + This fixes some cases where new nodes were missed. (#1669) + +## [1.3.1] - 2022-12-13 + +### Fixed + +- Panic in RackAwareRoundRobinPolicy caused by wrong alignment on 32-bit platforms. (#1666) + +## [1.3.0] - 2022-11-29 + +### Added + +- Added a RackAwareRoundRobinPolicy that attempts to keep client->server traffic in the same rack when possible. + +### Changed + +- Supported versions of Go that we test against are now Go 1.18 and Go 1.19. + +## [1.2.1] - 2022-09-02 + +### Changed + +- GetCustomPayload now returns nil instead of panicking in case of query error. (#1385) + +### Fixed + +- Nil pointer dereference in events.go when handling node removal. (#1652) +- Reading peers from DataStax Enterprise clusters. This was a regression in 1.2.0. (#1646) +- Unmarshaling maps did not pre-allocate the map. (#1642) + +## [1.2.0] - 2022-07-07 + +This release improves support for connecting through proxies and some improvements when using Cassandra 4.0 or later. + +### Added +- HostDialer interface now allows customizing connection including TLS setup per host. (#1629) + +### Changed +- The driver now uses `host_id` instead of connect address to identify nodes. (#1632) +- gocql reads `system.peers_v2` instead of `system.peers` when connected to Cassandra 4.0 or later and + populates `HostInfo.Port` using the native port. (#1635) + +### Fixed +- Data race in `HostInfo.HostnameAndPort()`. (#1631) +- Handling of nils when marshaling/unmarshaling lists and maps. (#1630) +- Silent data corruption in case a map was serialized into UDT and some fields in the UDT were not present in the map. + The driver now correctly writes nulls instead of shifting fields. (#1626, #1639) + +## [1.1.0] - 2022-04-29 + +### Added +- Changelog. +- StreamObserver and StreamObserverContext interfaces to allow observing CQL streams. +- ClusterConfig.WriteTimeout option now allows to specify a write-timeout different from read-timeout. +- TypeInfo.NewWithError method. + +### Changed +- Supported versions of Go that we test against are now Go 1.17 and Go 1.18. +- The driver now returns an error if SetWriteDeadline fails. If you need to run gocql on + a platform that does not support SetWriteDeadline, set WriteTimeout to zero to disable the timeout. +- Creating streams on a connection that is closing now fails early. +- HostFilter now also applies to control connections. +- TokenAwareHostPolicy now panics immediately during initialization instead of at random point later + if you reuse the TokenAwareHostPolicy between multiple sessions. Reusing TokenAwareHostPolicy between + sessions was never supported. + +### Fixed +- The driver no longer resets the network connection if a write fails with non-network-related error. +- Blocked network write to a network could block other goroutines, this is now fixed. +- Fixed panic in unmarshalUDT when trying to unmarshal a user-defined-type to a non-pointer Go type. +- Fixed panic when trying to unmarshal unknown/custom CQL type. + +## Deprecated +- TypeInfo.New, please use TypeInfo.NewWithError instead. + +## [1.0.0] - 2022-03-04 +### Changed +- Started tagging versions with semantic version tags diff --git a/vendor/github.com/gocql/gocql/CONTRIBUTING.md b/vendor/github.com/gocql/gocql/CONTRIBUTING.md new file mode 100644 index 000000000..8c2df74b7 --- /dev/null +++ b/vendor/github.com/gocql/gocql/CONTRIBUTING.md @@ -0,0 +1,78 @@ +# Contributing to gocql + +**TL;DR** - this manifesto sets out the bare minimum requirements for submitting a patch to gocql. + +This guide outlines the process of landing patches in gocql and the general approach to maintaining the code base. + +## Background + +The goal of the gocql project is to provide a stable and robust CQL driver for Go. gocql is a community driven project that is coordinated by a small team of core developers. + +## Minimum Requirement Checklist + +The following is a check list of requirements that need to be satisfied in order for us to merge your patch: + +* You should raise a pull request to gocql/gocql on Github +* The pull request has a title that clearly summarizes the purpose of the patch +* The motivation behind the patch is clearly defined in the pull request summary +* Your name and email have been added to the `AUTHORS` file (for copyright purposes) +* The patch will merge cleanly +* The test coverage does not fall below the critical threshold (currently 64%) +* The merge commit passes the regression test suite on Travis +* `go fmt` has been applied to the submitted code +* Notable changes (i.e. new features or changed behavior, bugfixes) are appropriately documented in CHANGELOG.md, functional changes also in godoc + +If there are any requirements that can't be reasonably satisfied, please state this either on the pull request or as part of discussion on the mailing list. Where appropriate, the core team may apply discretion and make an exception to these requirements. + +## Beyond The Checklist + +In addition to stating the hard requirements, there are a bunch of things that we consider when assessing changes to the library. These soft requirements are helpful pointers of how to get a patch landed quicker and with less fuss. + +### General QA Approach + +The gocql team needs to consider the ongoing maintainability of the library at all times. Patches that look like they will introduce maintenance issues for the team will not be accepted. + +Your patch will get merged quicker if you have decent test cases that provide test coverage for the new behavior you wish to introduce. + +Unit tests are good, integration tests are even better. An example of a unit test is `marshal_test.go` - this tests the serialization code in isolation. `cassandra_test.go` is an integration test suite that is executed against every version of Cassandra that gocql supports as part of the CI process on Travis. + +That said, the point of writing tests is to provide a safety net to catch regressions, so there is no need to go overboard with tests. Remember that the more tests you write, the more code we will have to maintain. So there's a balance to strike there. + +### When It's Too Difficult To Automate Testing + +There are legitimate examples of where it is infeasible to write a regression test for a change. Never fear, we will still consider the patch and quite possibly accept the change without a test. The gocql team takes a pragmatic approach to testing. At the end of the day, you could be addressing an issue that is too difficult to reproduce in a test suite, but still occurs in a real production app. In this case, your production app is the test case, and we will have to trust that your change is good. + +Examples of pull requests that have been accepted without tests include: + +* https://github.com/gocql/gocql/pull/181 - this patch would otherwise require a multi-node cluster to be booted as part of the CI build +* https://github.com/gocql/gocql/pull/179 - this bug can only be reproduced under heavy load in certain circumstances + +### Sign Off Procedure + +Generally speaking, a pull request can get merged by any one of the core gocql team. If your change is minor, chances are that one team member will just go ahead and merge it there and then. As stated earlier, suitable test coverage will increase the likelihood that a single reviewer will assess and merge your change. If your change has no test coverage, or looks like it may have wider implications for the health and stability of the library, the reviewer may elect to refer the change to another team member to achieve consensus before proceeding. Therefore, the tighter and cleaner your patch is, the quicker it will go through the review process. + +### Supported Features + +gocql is a low level wire driver for Cassandra CQL. By and large, we would like to keep the functional scope of the library as narrow as possible. We think that gocql should be tight and focused, and we will be naturally skeptical of things that could just as easily be implemented in a higher layer. Inevitably you will come across something that could be implemented in a higher layer, save for a minor change to the core API. In this instance, please strike up a conversation with the gocql team. Chances are we will understand what you are trying to achieve and will try to accommodate this in a maintainable way. + +### Longer Term Evolution + +There are some long term plans for gocql that have to be taken into account when assessing changes. That said, gocql is ultimately a community driven project and we don't have a massive development budget, so sometimes the long term view might need to be de-prioritized ahead of short term changes. + +## Officially Supported Server Versions + +Currently, the officially supported versions of the Cassandra server include: + +* 1.2.18 +* 2.0.9 + +Chances are that gocql will work with many other versions. If you would like us to support a particular version of Cassandra, please start a conversation about what version you'd like us to consider. We are more likely to accept a new version if you help out by extending the regression suite to cover the new version to be supported. + +## The Core Dev Team + +The core development team includes: + +* tux21b +* phillipCouto +* Zariel +* 0x6e6562 diff --git a/vendor/github.com/gocql/gocql/LICENSE b/vendor/github.com/gocql/gocql/LICENSE new file mode 100644 index 000000000..3836494a9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, The Gocql authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gocql/gocql/README.md b/vendor/github.com/gocql/gocql/README.md new file mode 100644 index 000000000..525731ce1 --- /dev/null +++ b/vendor/github.com/gocql/gocql/README.md @@ -0,0 +1,166 @@ +gocql +===== + +[![Join the chat at https://gitter.im/gocql/gocql](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gocql/gocql?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![go build](https://github.com/gocql/gocql/actions/workflows/main.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/gocql/gocql?status.svg)](https://godoc.org/github.com/gocql/gocql) + +Package gocql implements a fast and robust Cassandra client for the +Go programming language. + +Project Website: https://gocql.github.io/
+API documentation: https://godoc.org/github.com/gocql/gocql
+Discussions: https://groups.google.com/forum/#!forum/gocql + +Supported Versions +------------------ + +The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build: + +| Go/Cassandra | 4.0.x | 4.1.x | +|--------------|-------|-------| +| 1.19 | yes | yes | +| 1.20 | yes | yes | + +Gocql has been tested in production against many versions of Cassandra. Due to limits in our CI setup we only +test against the latest 2 GA releases. + +Sunsetting Model +---------------- + +In general, the gocql team will focus on supporting the current and previous versions of Go. gocql may still work with older versions of Go, but official support for these versions will have been sunset. + +Installation +------------ + + go get github.com/gocql/gocql + + +Features +-------- + +* Modern Cassandra client using the native transport +* Automatic type conversions between Cassandra and Go + * Support for all common types including sets, lists and maps + * Custom types can implement a `Marshaler` and `Unmarshaler` interface + * Strict type conversions without any loss of precision + * Built-In support for UUIDs (version 1 and 4) +* Support for logged, unlogged and counter batches +* Cluster management + * Automatic reconnect on connection failures with exponential falloff + * Round robin distribution of queries to different hosts + * Round robin distribution of queries to different connections on a host + * Each connection can execute up to n concurrent queries (whereby n is the limit set by the protocol version the client chooses to use) + * Optional automatic discovery of nodes + * Policy based connection pool with token aware and round-robin policy implementations +* Support for password authentication +* Iteration over paged results with configurable page size +* Support for TLS/SSL +* Optional frame compression (using snappy) +* Automatic query preparation +* Support for query tracing +* Support for Cassandra 2.1+ [binary protocol version 3](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v3.spec) + * Support for up to 32768 streams + * Support for tuple types + * Support for client side timestamps by default + * Support for UDTs via a custom marshaller or struct tags +* Support for Cassandra 3.0+ [binary protocol version 4](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec) +* An API to access the schema metadata of a given keyspace + +Performance +----------- +While the driver strives to be highly performant, there are cases where it is difficult to test and verify. The driver is built +with maintainability and code readability in mind first and then performance and features, as such every now and then performance +may degrade, if this occurs please report and issue and it will be looked at and remedied. The only time the driver copies data from +its read buffer is when it Unmarshal's data into supplied types. + +Some tips for getting more performance from the driver: +* Use the TokenAware policy +* Use many goroutines when doing inserts, the driver is asynchronous but provides a synchronous API, it can execute many queries concurrently +* Tune query page size +* Reading data from the network to unmarshal will incur a large amount of allocations, this can adversely affect the garbage collector, tune `GOGC` +* Close iterators after use to recycle byte buffers + +Important Default Keyspace Changes +---------------------------------- +gocql no longer supports executing "use " statements to simplify the library. The user still has the +ability to define the default keyspace for connections but now the keyspace can only be defined before a +session is created. Queries can still access keyspaces by indicating the keyspace in the query: +```sql +SELECT * FROM example2.table; +``` + +Example of correct usage: +```go + cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") + cluster.Keyspace = "example" + ... + session, err := cluster.CreateSession() + +``` +Example of incorrect usage: +```go + cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") + cluster.Keyspace = "example" + ... + session, err := cluster.CreateSession() + + if err = session.Query("use example2").Exec(); err != nil { + log.Fatal(err) + } +``` +This will result in an err being returned from the session.Query line as the user is trying to execute a "use" +statement. + +Example +------- + +See [package documentation](https://pkg.go.dev/github.com/gocql/gocql#pkg-examples). + +Data Binding +------------ + +There are various ways to bind application level data structures to CQL statements: + +* You can write the data binding by hand, as outlined in the Tweet example. This provides you with the greatest flexibility, but it does mean that you need to keep your application code in sync with your Cassandra schema. +* You can dynamically marshal an entire query result into an `[]map[string]interface{}` using the `SliceMap()` API. This returns a slice of row maps keyed by CQL column names. This method requires no special interaction with the gocql API, but it does require your application to be able to deal with a key value view of your data. +* As a refinement on the `SliceMap()` API you can also call `MapScan()` which returns `map[string]interface{}` instances in a row by row fashion. +* The `Bind()` API provides a client app with a low level mechanism to introspect query meta data and extract appropriate field values from application level data structures. +* The [gocqlx](https://github.com/scylladb/gocqlx) package is an idiomatic extension to gocql that provides usability features. With gocqlx you can bind the query parameters from maps and structs, use named query parameters (:identifier) and scan the query results into structs and slices. It comes with a fluent and flexible CQL query builder that supports full CQL spec, including BATCH statements and custom functions. +* Building on top of the gocql driver, [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. +* Another external project that layers on top of gocql is [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. +* [gocassa](https://github.com/hailocab/gocassa) is an external project that layers on top of gocql to provide convenient query building and data binding. +* [gocqltable](https://github.com/kristoiv/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier. + +Ecosystem +--------- + +The following community maintained tools are known to integrate with gocql: + +* [gocqlx](https://github.com/scylladb/gocqlx) is a gocql extension that automates data binding, adds named queries support, provides flexible query builders and plays well with gocql. +* [journey](https://github.com/db-journey/journey) is a migration tool with Cassandra support. +* [negronicql](https://github.com/mikebthun/negronicql) is gocql middleware for Negroni. +* [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement. +* [cqlc](http://relops.com/cqlc) generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax. +* [gocassa](https://github.com/hailocab/gocassa) provides query building, adds data binding, and provides easy-to-use "recipe" tables for common query use-cases. +* [gocqltable](https://github.com/kristoiv/gocqltable) is a wrapper around gocql that aims to simplify common operations. +* [gockle](https://github.com/willfaught/gockle) provides simple, mockable interfaces that wrap gocql types +* [scylladb](https://github.com/scylladb/scylla) is a fast Apache Cassandra-compatible NoSQL database +* [go-cql-driver](https://github.com/MichaelS11/go-cql-driver) is an CQL driver conforming to the built-in database/sql interface. It is good for simple use cases where the database/sql interface is wanted. The CQL driver is a wrapper around this project. + +Other Projects +-------------- + +* [gocqldriver](https://github.com/tux21b/gocqldriver) is the predecessor of gocql based on Go's `database/sql` package. This project isn't maintained anymore, because Cassandra wasn't a good fit for the traditional `database/sql` API. Use this package instead. + +SEO +--- + +For some reason, when you Google `golang cassandra`, this project doesn't feature very highly in the result list. But if you Google `go cassandra`, then we're a bit higher up the list. So this is note to try to convince Google that golang is an alias for Go. + +License +------- + +> Copyright (c) 2012-2016 The gocql Authors. All rights reserved. +> Use of this source code is governed by a BSD-style +> license that can be found in the LICENSE file. diff --git a/vendor/github.com/gocql/gocql/address_translators.go b/vendor/github.com/gocql/gocql/address_translators.go new file mode 100644 index 000000000..6638bcaa8 --- /dev/null +++ b/vendor/github.com/gocql/gocql/address_translators.go @@ -0,0 +1,26 @@ +package gocql + +import "net" + +// AddressTranslator provides a way to translate node addresses (and ports) that are +// discovered or received as a node event. This can be useful in an ec2 environment, +// for instance, to translate public IPs to private IPs. +type AddressTranslator interface { + // Translate will translate the provided address and/or port to another + // address and/or port. If no translation is possible, Translate will return the + // address and port provided to it. + Translate(addr net.IP, port int) (net.IP, int) +} + +type AddressTranslatorFunc func(addr net.IP, port int) (net.IP, int) + +func (fn AddressTranslatorFunc) Translate(addr net.IP, port int) (net.IP, int) { + return fn(addr, port) +} + +// IdentityTranslator will do nothing but return what it was provided. It is essentially a no-op. +func IdentityTranslator() AddressTranslator { + return AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) { + return addr, port + }) +} diff --git a/vendor/github.com/gocql/gocql/cluster.go b/vendor/github.com/gocql/gocql/cluster.go new file mode 100644 index 000000000..e4b6e4673 --- /dev/null +++ b/vendor/github.com/gocql/gocql/cluster.go @@ -0,0 +1,315 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "context" + "errors" + "net" + "time" +) + +// PoolConfig configures the connection pool used by the driver, it defaults to +// using a round-robin host selection policy and a round-robin connection selection +// policy for each host. +type PoolConfig struct { + // HostSelectionPolicy sets the policy for selecting which host to use for a + // given query (default: RoundRobinHostPolicy()) + // It is not supported to use a single HostSelectionPolicy in multiple sessions + // (even if you close the old session before using in a new session). + HostSelectionPolicy HostSelectionPolicy +} + +func (p PoolConfig) buildPool(session *Session) *policyConnPool { + return newPolicyConnPool(session) +} + +// ClusterConfig is a struct to configure the default cluster implementation +// of gocql. It has a variety of attributes that can be used to modify the +// behavior to fit the most common use cases. Applications that require a +// different setup must implement their own cluster. +type ClusterConfig struct { + // addresses for the initial connections. It is recommended to use the value set in + // the Cassandra config for broadcast_address or listen_address, an IP address not + // a domain name. This is because events from Cassandra will use the configured IP + // address, which is used to index connected hosts. If the domain name specified + // resolves to more than 1 IP address then the driver may connect multiple times to + // the same host, and will not mark the node being down or up from events. + Hosts []string + + // CQL version (default: 3.0.0) + CQLVersion string + + // ProtoVersion sets the version of the native protocol to use, this will + // enable features in the driver for specific protocol versions, generally this + // should be set to a known version (2,3,4) for the cluster being connected to. + // + // If it is 0 or unset (the default) then the driver will attempt to discover the + // highest supported protocol for the cluster. In clusters with nodes of different + // versions the protocol selected is not defined (ie, it can be any of the supported in the cluster) + ProtoVersion int + + // Timeout limits the time spent on the client side while executing a query. + // Specifically, query or batch execution will return an error if the client does not receive a response + // from the server within the Timeout period. + // Timeout is also used to configure the read timeout on the underlying network connection. + // Client Timeout should always be higher than the request timeouts configured on the server, + // so that retries don't overload the server. + // Timeout has a default value of 11 seconds, which is higher than default server timeout for most query types. + // Timeout is not applied to requests during initial connection setup, see ConnectTimeout. + Timeout time.Duration + + // ConnectTimeout limits the time spent during connection setup. + // During initial connection setup, internal queries, AUTH requests will return an error if the client + // does not receive a response within the ConnectTimeout period. + // ConnectTimeout is applied to the connection setup queries independently. + // ConnectTimeout also limits the duration of dialing a new TCP connection + // in case there is no Dialer nor HostDialer configured. + // ConnectTimeout has a default value of 11 seconds. + ConnectTimeout time.Duration + + // WriteTimeout limits the time the driver waits to write a request to a network connection. + // WriteTimeout should be lower than or equal to Timeout. + // WriteTimeout defaults to the value of Timeout. + WriteTimeout time.Duration + + // Port used when dialing. + // Default: 9042 + Port int + + // Initial keyspace. Optional. + Keyspace string + + // Number of connections per host. + // Default: 2 + NumConns int + + // Default consistency level. + // Default: Quorum + Consistency Consistency + + // Compression algorithm. + // Default: nil + Compressor Compressor + + // Default: nil + Authenticator Authenticator + + // An Authenticator factory. Can be used to create alternative authenticators. + // Default: nil + AuthProvider func(h *HostInfo) (Authenticator, error) + + // Default retry policy to use for queries. + // Default: no retries. + RetryPolicy RetryPolicy + + // ConvictionPolicy decides whether to mark host as down based on the error and host info. + // Default: SimpleConvictionPolicy + ConvictionPolicy ConvictionPolicy + + // Default reconnection policy to use for reconnecting before trying to mark host as down. + ReconnectionPolicy ReconnectionPolicy + + // The keepalive period to use, enabled if > 0 (default: 0) + // SocketKeepalive is used to set up the default dialer and is ignored if Dialer or HostDialer is provided. + SocketKeepalive time.Duration + + // Maximum cache size for prepared statements globally for gocql. + // Default: 1000 + MaxPreparedStmts int + + // Maximum cache size for query info about statements for each session. + // Default: 1000 + MaxRoutingKeyInfo int + + // Default page size to use for created sessions. + // Default: 5000 + PageSize int + + // Consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL. + // Default: unset + SerialConsistency SerialConsistency + + // SslOpts configures TLS use when HostDialer is not set. + // SslOpts is ignored if HostDialer is set. + SslOpts *SslOptions + + // Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server. + // Default: true, only enabled for protocol 3 and above. + DefaultTimestamp bool + + // PoolConfig configures the underlying connection pool, allowing the + // configuration of host selection and connection selection policies. + PoolConfig PoolConfig + + // If not zero, gocql attempt to reconnect known DOWN nodes in every ReconnectInterval. + ReconnectInterval time.Duration + + // The maximum amount of time to wait for schema agreement in a cluster after + // receiving a schema change frame. (default: 60s) + MaxWaitSchemaAgreement time.Duration + + // HostFilter will filter all incoming events for host, any which don't pass + // the filter will be ignored. If set will take precedence over any options set + // via Discovery + HostFilter HostFilter + + // AddressTranslator will translate addresses found on peer discovery and/or + // node change events. + AddressTranslator AddressTranslator + + // If IgnorePeerAddr is true and the address in system.peers does not match + // the supplied host by either initial hosts or discovered via events then the + // host will be replaced with the supplied address. + // + // For example if an event comes in with host=10.0.0.1 but when looking up that + // address in system.local or system.peers returns 127.0.0.1, the peer will be + // set to 10.0.0.1 which is what will be used to connect to. + IgnorePeerAddr bool + + // If DisableInitialHostLookup then the driver will not attempt to get host info + // from the system.peers table, this will mean that the driver will connect to + // hosts supplied and will not attempt to lookup the hosts information, this will + // mean that data_centre, rack and token information will not be available and as + // such host filtering and token aware query routing will not be available. + DisableInitialHostLookup bool + + // Configure events the driver will register for + Events struct { + // disable registering for status events (node up/down) + DisableNodeStatusEvents bool + // disable registering for topology events (node added/removed/moved) + DisableTopologyEvents bool + // disable registering for schema events (keyspace/table/function removed/created/updated) + DisableSchemaEvents bool + } + + // DisableSkipMetadata will override the internal result metadata cache so that the driver does not + // send skip_metadata for queries, this means that the result will always contain + // the metadata to parse the rows and will not reuse the metadata from the prepared + // statement. + // + // See https://issues.apache.org/jira/browse/CASSANDRA-10786 + DisableSkipMetadata bool + + // QueryObserver will set the provided query observer on all queries created from this session. + // Use it to collect metrics / stats from queries by providing an implementation of QueryObserver. + QueryObserver QueryObserver + + // BatchObserver will set the provided batch observer on all queries created from this session. + // Use it to collect metrics / stats from batch queries by providing an implementation of BatchObserver. + BatchObserver BatchObserver + + // ConnectObserver will set the provided connect observer on all queries + // created from this session. + ConnectObserver ConnectObserver + + // FrameHeaderObserver will set the provided frame header observer on all frames' headers created from this session. + // Use it to collect metrics / stats from frames by providing an implementation of FrameHeaderObserver. + FrameHeaderObserver FrameHeaderObserver + + // StreamObserver will be notified of stream state changes. + // This can be used to track in-flight protocol requests and responses. + StreamObserver StreamObserver + + // Default idempotence for queries + DefaultIdempotence bool + + // The time to wait for frames before flushing the frames connection to Cassandra. + // Can help reduce syscall overhead by making less calls to write. Set to 0 to + // disable. + // + // (default: 200 microseconds) + WriteCoalesceWaitTime time.Duration + + // Dialer will be used to establish all connections created for this Cluster. + // If not provided, a default dialer configured with ConnectTimeout will be used. + // Dialer is ignored if HostDialer is provided. + Dialer Dialer + + // HostDialer will be used to establish all connections for this Cluster. + // If not provided, Dialer will be used instead. + HostDialer HostDialer + + // Logger for this ClusterConfig. + // If not specified, defaults to the global gocql.Logger. + Logger StdLogger + + // internal config for testing + disableControlConn bool +} + +type Dialer interface { + DialContext(ctx context.Context, network, addr string) (net.Conn, error) +} + +// NewCluster generates a new config for the default cluster implementation. +// +// The supplied hosts are used to initially connect to the cluster then the rest of +// the ring will be automatically discovered. It is recommended to use the value set in +// the Cassandra config for broadcast_address or listen_address, an IP address not +// a domain name. This is because events from Cassandra will use the configured IP +// address, which is used to index connected hosts. If the domain name specified +// resolves to more than 1 IP address then the driver may connect multiple times to +// the same host, and will not mark the node being down or up from events. +func NewCluster(hosts ...string) *ClusterConfig { + cfg := &ClusterConfig{ + Hosts: hosts, + CQLVersion: "3.0.0", + Timeout: 11 * time.Second, + ConnectTimeout: 11 * time.Second, + Port: 9042, + NumConns: 2, + Consistency: Quorum, + MaxPreparedStmts: defaultMaxPreparedStmts, + MaxRoutingKeyInfo: 1000, + PageSize: 5000, + DefaultTimestamp: true, + MaxWaitSchemaAgreement: 60 * time.Second, + ReconnectInterval: 60 * time.Second, + ConvictionPolicy: &SimpleConvictionPolicy{}, + ReconnectionPolicy: &ConstantReconnectionPolicy{MaxRetries: 3, Interval: 1 * time.Second}, + WriteCoalesceWaitTime: 200 * time.Microsecond, + } + return cfg +} + +func (cfg *ClusterConfig) logger() StdLogger { + if cfg.Logger == nil { + return Logger + } + return cfg.Logger +} + +// CreateSession initializes the cluster based on this config and returns a +// session object that can be used to interact with the database. +func (cfg *ClusterConfig) CreateSession() (*Session, error) { + return NewSession(*cfg) +} + +// translateAddressPort is a helper method that will use the given AddressTranslator +// if defined, to translate the given address and port into a possibly new address +// and port, If no AddressTranslator or if an error occurs, the given address and +// port will be returned. +func (cfg *ClusterConfig) translateAddressPort(addr net.IP, port int) (net.IP, int) { + if cfg.AddressTranslator == nil || len(addr) == 0 { + return addr, port + } + newAddr, newPort := cfg.AddressTranslator.Translate(addr, port) + if gocqlDebug { + cfg.logger().Printf("gocql: translating address '%v:%d' to '%v:%d'", addr, port, newAddr, newPort) + } + return newAddr, newPort +} + +func (cfg *ClusterConfig) filterHost(host *HostInfo) bool { + return !(cfg.HostFilter == nil || cfg.HostFilter.Accept(host)) +} + +var ( + ErrNoHosts = errors.New("no hosts provided") + ErrNoConnectionsStarted = errors.New("no connections were made when creating the session") + ErrHostQueryFailed = errors.New("unable to populate Hosts") +) diff --git a/vendor/github.com/gocql/gocql/compressor.go b/vendor/github.com/gocql/gocql/compressor.go new file mode 100644 index 000000000..26853ae7f --- /dev/null +++ b/vendor/github.com/gocql/gocql/compressor.go @@ -0,0 +1,28 @@ +package gocql + +import ( + "github.com/golang/snappy" +) + +type Compressor interface { + Name() string + Encode(data []byte) ([]byte, error) + Decode(data []byte) ([]byte, error) +} + +// SnappyCompressor implements the Compressor interface and can be used to +// compress incoming and outgoing frames. The snappy compression algorithm +// aims for very high speeds and reasonable compression. +type SnappyCompressor struct{} + +func (s SnappyCompressor) Name() string { + return "snappy" +} + +func (s SnappyCompressor) Encode(data []byte) ([]byte, error) { + return snappy.Encode(nil, data), nil +} + +func (s SnappyCompressor) Decode(data []byte) ([]byte, error) { + return snappy.Decode(nil, data) +} diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go new file mode 100644 index 000000000..d780bcd97 --- /dev/null +++ b/vendor/github.com/gocql/gocql/conn.go @@ -0,0 +1,1745 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "bufio" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/gocql/gocql/internal/lru" + "github.com/gocql/gocql/internal/streams" +) + +var ( + defaultApprovedAuthenticators = []string{ + "org.apache.cassandra.auth.PasswordAuthenticator", + "com.instaclustr.cassandra.auth.SharedSecretAuthenticator", + "com.datastax.bdp.cassandra.auth.DseAuthenticator", + "io.aiven.cassandra.auth.AivenAuthenticator", + "com.ericsson.bss.cassandra.ecaudit.auth.AuditPasswordAuthenticator", + "com.amazon.helenus.auth.HelenusAuthenticator", + "com.ericsson.bss.cassandra.ecaudit.auth.AuditAuthenticator", + "com.scylladb.auth.SaslauthdAuthenticator", + "com.scylladb.auth.TransitionalAuthenticator", + "com.instaclustr.cassandra.auth.InstaclustrPasswordAuthenticator", + } +) + +// approve the authenticator with the list of allowed authenticators or default list if approvedAuthenticators is empty. +func approve(authenticator string, approvedAuthenticators []string) bool { + if len(approvedAuthenticators) == 0 { + approvedAuthenticators = defaultApprovedAuthenticators + } + for _, s := range approvedAuthenticators { + if authenticator == s { + return true + } + } + return false +} + +// JoinHostPort is a utility to return an address string that can be used +// by `gocql.Conn` to form a connection with a host. +func JoinHostPort(addr string, port int) string { + addr = strings.TrimSpace(addr) + if _, _, err := net.SplitHostPort(addr); err != nil { + addr = net.JoinHostPort(addr, strconv.Itoa(port)) + } + return addr +} + +type Authenticator interface { + Challenge(req []byte) (resp []byte, auth Authenticator, err error) + Success(data []byte) error +} + +type PasswordAuthenticator struct { + Username string + Password string + AllowedAuthenticators []string +} + +func (p PasswordAuthenticator) Challenge(req []byte) ([]byte, Authenticator, error) { + if !approve(string(req), p.AllowedAuthenticators) { + return nil, nil, fmt.Errorf("unexpected authenticator %q", req) + } + resp := make([]byte, 2+len(p.Username)+len(p.Password)) + resp[0] = 0 + copy(resp[1:], p.Username) + resp[len(p.Username)+1] = 0 + copy(resp[2+len(p.Username):], p.Password) + return resp, nil, nil +} + +func (p PasswordAuthenticator) Success(data []byte) error { + return nil +} + +// SslOptions configures TLS use. +// +// Warning: Due to historical reasons, the SslOptions is insecure by default, so you need to set EnableHostVerification +// to true if no Config is set. Most users should set SslOptions.Config to a *tls.Config. +// SslOptions and Config.InsecureSkipVerify interact as follows: +// +// Config.InsecureSkipVerify | EnableHostVerification | Result +// Config is nil | false | do not verify host +// Config is nil | true | verify host +// false | false | verify host +// true | false | do not verify host +// false | true | verify host +// true | true | verify host +type SslOptions struct { + *tls.Config + + // CertPath and KeyPath are optional depending on server + // config, but both fields must be omitted to avoid using a + // client certificate + CertPath string + KeyPath string + CaPath string //optional depending on server config + // If you want to verify the hostname and server cert (like a wildcard for cass cluster) then you should turn this + // on. + // This option is basically the inverse of tls.Config.InsecureSkipVerify. + // See InsecureSkipVerify in http://golang.org/pkg/crypto/tls/ for more info. + // + // See SslOptions documentation to see how EnableHostVerification interacts with the provided tls.Config. + EnableHostVerification bool +} + +type ConnConfig struct { + ProtoVersion int + CQLVersion string + Timeout time.Duration + WriteTimeout time.Duration + ConnectTimeout time.Duration + Dialer Dialer + HostDialer HostDialer + Compressor Compressor + Authenticator Authenticator + AuthProvider func(h *HostInfo) (Authenticator, error) + Keepalive time.Duration + Logger StdLogger + + tlsConfig *tls.Config + disableCoalesce bool +} + +func (c *ConnConfig) logger() StdLogger { + if c.Logger == nil { + return Logger + } + return c.Logger +} + +type ConnErrorHandler interface { + HandleError(conn *Conn, err error, closed bool) +} + +type connErrorHandlerFn func(conn *Conn, err error, closed bool) + +func (fn connErrorHandlerFn) HandleError(conn *Conn, err error, closed bool) { + fn(conn, err, closed) +} + +// If not zero, how many timeouts we will allow to occur before the connection is closed +// and restarted. This is to prevent a single query timeout from killing a connection +// which may be serving more queries just fine. +// Default is 0, should not be changed concurrently with queries. +// +// Deprecated. +var TimeoutLimit int64 = 0 + +// Conn is a single connection to a Cassandra node. It can be used to execute +// queries, but users are usually advised to use a more reliable, higher +// level API. +type Conn struct { + conn net.Conn + r *bufio.Reader + w contextWriter + + timeout time.Duration + writeTimeout time.Duration + cfg *ConnConfig + frameObserver FrameHeaderObserver + streamObserver StreamObserver + + headerBuf [maxFrameHeaderSize]byte + + streams *streams.IDGenerator + mu sync.Mutex + // calls stores a map from stream ID to callReq. + // This map is protected by mu. + // calls should not be used when closed is true, calls is set to nil when closed=true. + calls map[int]*callReq + + errorHandler ConnErrorHandler + compressor Compressor + auth Authenticator + addr string + + version uint8 + currentKeyspace string + host *HostInfo + isSchemaV2 bool + + session *Session + + // true if connection close process for the connection started. + // closed is protected by mu. + closed bool + ctx context.Context + cancel context.CancelFunc + + timeouts int64 + + logger StdLogger +} + +// connect establishes a connection to a Cassandra node using session's connection config. +func (s *Session) connect(ctx context.Context, host *HostInfo, errorHandler ConnErrorHandler) (*Conn, error) { + return s.dial(ctx, host, s.connCfg, errorHandler) +} + +// dial establishes a connection to a Cassandra node and notifies the session's connectObserver. +func (s *Session) dial(ctx context.Context, host *HostInfo, connConfig *ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) { + var obs ObservedConnect + if s.connectObserver != nil { + obs.Host = host + obs.Start = time.Now() + } + + conn, err := s.dialWithoutObserver(ctx, host, connConfig, errorHandler) + + if s.connectObserver != nil { + obs.End = time.Now() + obs.Err = err + s.connectObserver.ObserveConnect(obs) + } + + return conn, err +} + +// dialWithoutObserver establishes connection to a Cassandra node. +// +// dialWithoutObserver does not notify the connection observer, so you most probably want to call dial() instead. +func (s *Session) dialWithoutObserver(ctx context.Context, host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) { + dialedHost, err := cfg.HostDialer.DialHost(ctx, host) + if err != nil { + return nil, err + } + + writeTimeout := cfg.Timeout + if cfg.WriteTimeout > 0 { + writeTimeout = cfg.WriteTimeout + } + + ctx, cancel := context.WithCancel(ctx) + c := &Conn{ + conn: dialedHost.Conn, + r: bufio.NewReader(dialedHost.Conn), + cfg: cfg, + calls: make(map[int]*callReq), + version: uint8(cfg.ProtoVersion), + addr: dialedHost.Conn.RemoteAddr().String(), + errorHandler: errorHandler, + compressor: cfg.Compressor, + session: s, + streams: streams.New(cfg.ProtoVersion), + host: host, + isSchemaV2: true, // Try using "system.peers_v2" until proven otherwise + frameObserver: s.frameObserver, + w: &deadlineContextWriter{ + w: dialedHost.Conn, + timeout: writeTimeout, + semaphore: make(chan struct{}, 1), + quit: make(chan struct{}), + }, + ctx: ctx, + cancel: cancel, + logger: cfg.logger(), + streamObserver: s.streamObserver, + writeTimeout: writeTimeout, + } + + if err := c.init(ctx, dialedHost); err != nil { + cancel() + c.Close() + return nil, err + } + + return c, nil +} + +func (c *Conn) init(ctx context.Context, dialedHost *DialedHost) error { + if c.session.cfg.AuthProvider != nil { + var err error + c.auth, err = c.cfg.AuthProvider(c.host) + if err != nil { + return err + } + } else { + c.auth = c.cfg.Authenticator + } + + startup := &startupCoordinator{ + frameTicker: make(chan struct{}), + conn: c, + } + + c.timeout = c.cfg.ConnectTimeout + if err := startup.setupConn(ctx); err != nil { + return err + } + + c.timeout = c.cfg.Timeout + + // dont coalesce startup frames + if c.session.cfg.WriteCoalesceWaitTime > 0 && !c.cfg.disableCoalesce && !dialedHost.DisableCoalesce { + c.w = newWriteCoalescer(c.conn, c.writeTimeout, c.session.cfg.WriteCoalesceWaitTime, ctx.Done()) + } + + go c.serve(ctx) + go c.heartBeat(ctx) + + return nil +} + +func (c *Conn) Write(p []byte) (n int, err error) { + return c.w.writeContext(context.Background(), p) +} + +func (c *Conn) Read(p []byte) (n int, err error) { + const maxAttempts = 5 + + for i := 0; i < maxAttempts; i++ { + var nn int + if c.timeout > 0 { + c.conn.SetReadDeadline(time.Now().Add(c.timeout)) + } + + nn, err = io.ReadFull(c.r, p[n:]) + n += nn + if err == nil { + break + } + + if verr, ok := err.(net.Error); !ok || !verr.Temporary() { + break + } + } + + return +} + +type startupCoordinator struct { + conn *Conn + frameTicker chan struct{} +} + +func (s *startupCoordinator) setupConn(ctx context.Context) error { + var cancel context.CancelFunc + if s.conn.timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, s.conn.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + startupErr := make(chan error) + go func() { + for range s.frameTicker { + err := s.conn.recv(ctx) + if err != nil { + select { + case startupErr <- err: + case <-ctx.Done(): + } + + return + } + } + }() + + go func() { + defer close(s.frameTicker) + err := s.options(ctx) + select { + case startupErr <- err: + case <-ctx.Done(): + } + }() + + select { + case err := <-startupErr: + if err != nil { + return err + } + case <-ctx.Done(): + return errors.New("gocql: no response to connection startup within timeout") + } + + return nil +} + +func (s *startupCoordinator) write(ctx context.Context, frame frameBuilder) (frame, error) { + select { + case s.frameTicker <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + + framer, err := s.conn.exec(ctx, frame, nil) + if err != nil { + return nil, err + } + + return framer.parseFrame() +} + +func (s *startupCoordinator) options(ctx context.Context) error { + frame, err := s.write(ctx, &writeOptionsFrame{}) + if err != nil { + return err + } + + supported, ok := frame.(*supportedFrame) + if !ok { + return NewErrProtocol("Unknown type of response to startup frame: %T", frame) + } + + return s.startup(ctx, supported.supported) +} + +func (s *startupCoordinator) startup(ctx context.Context, supported map[string][]string) error { + m := map[string]string{ + "CQL_VERSION": s.conn.cfg.CQLVersion, + "DRIVER_NAME": driverName, + "DRIVER_VERSION": driverVersion, + } + + if s.conn.compressor != nil { + comp := supported["COMPRESSION"] + name := s.conn.compressor.Name() + for _, compressor := range comp { + if compressor == name { + m["COMPRESSION"] = compressor + break + } + } + + if _, ok := m["COMPRESSION"]; !ok { + s.conn.compressor = nil + } + } + + frame, err := s.write(ctx, &writeStartupFrame{opts: m}) + if err != nil { + return err + } + + switch v := frame.(type) { + case error: + return v + case *readyFrame: + return nil + case *authenticateFrame: + return s.authenticateHandshake(ctx, v) + default: + return NewErrProtocol("Unknown type of response to startup frame: %s", v) + } +} + +func (s *startupCoordinator) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame) error { + if s.conn.auth == nil { + return fmt.Errorf("authentication required (using %q)", authFrame.class) + } + + resp, challenger, err := s.conn.auth.Challenge([]byte(authFrame.class)) + if err != nil { + return err + } + + req := &writeAuthResponseFrame{data: resp} + for { + frame, err := s.write(ctx, req) + if err != nil { + return err + } + + switch v := frame.(type) { + case error: + return v + case *authSuccessFrame: + if challenger != nil { + return challenger.Success(v.data) + } + return nil + case *authChallengeFrame: + resp, challenger, err = challenger.Challenge(v.data) + if err != nil { + return err + } + + req = &writeAuthResponseFrame{ + data: resp, + } + default: + return fmt.Errorf("unknown frame response during authentication: %v", v) + } + } +} + +func (c *Conn) closeWithError(err error) { + if c == nil { + return + } + + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return + } + c.closed = true + + var callsToClose map[int]*callReq + + // We should attempt to deliver the error back to the caller if it + // exists. However, don't block c.mu while we are delivering the + // error to outstanding calls. + if err != nil { + callsToClose = c.calls + // It is safe to change c.calls to nil. Nobody should use it after c.closed is set to true. + c.calls = nil + } + c.mu.Unlock() + + for _, req := range callsToClose { + // we need to send the error to all waiting queries. + select { + case req.resp <- callResp{err: err}: + case <-req.timeout: + } + if req.streamObserverContext != nil { + req.streamObserverEndOnce.Do(func() { + req.streamObserverContext.StreamAbandoned(ObservedStream{ + Host: c.host, + }) + }) + } + } + + // if error was nil then unblock the quit channel + c.cancel() + cerr := c.close() + + if err != nil { + c.errorHandler.HandleError(c, err, true) + } else if cerr != nil { + // TODO(zariel): is it a good idea to do this? + c.errorHandler.HandleError(c, cerr, true) + } +} + +func (c *Conn) close() error { + return c.conn.Close() +} + +func (c *Conn) Close() { + c.closeWithError(nil) +} + +// Serve starts the stream multiplexer for this connection, which is required +// to execute any queries. This method runs as long as the connection is +// open and is therefore usually called in a separate goroutine. +func (c *Conn) serve(ctx context.Context) { + var err error + for err == nil { + err = c.recv(ctx) + } + + c.closeWithError(err) +} + +func (c *Conn) discardFrame(head frameHeader) error { + _, err := io.CopyN(ioutil.Discard, c, int64(head.length)) + if err != nil { + return err + } + return nil +} + +type protocolError struct { + frame frame +} + +func (p *protocolError) Error() string { + if err, ok := p.frame.(error); ok { + return err.Error() + } + return fmt.Sprintf("gocql: received unexpected frame on stream %d: %v", p.frame.Header().stream, p.frame) +} + +func (c *Conn) heartBeat(ctx context.Context) { + sleepTime := 1 * time.Second + timer := time.NewTimer(sleepTime) + defer timer.Stop() + + var failures int + + for { + if failures > 5 { + c.closeWithError(fmt.Errorf("gocql: heartbeat failed")) + return + } + + timer.Reset(sleepTime) + + select { + case <-ctx.Done(): + return + case <-timer.C: + } + + framer, err := c.exec(context.Background(), &writeOptionsFrame{}, nil) + if err != nil { + failures++ + continue + } + + resp, err := framer.parseFrame() + if err != nil { + // invalid frame + failures++ + continue + } + + switch resp.(type) { + case *supportedFrame: + // Everything ok + sleepTime = 5 * time.Second + failures = 0 + case error: + // TODO: should we do something here? + default: + panic(fmt.Sprintf("gocql: unknown frame in response to options: %T", resp)) + } + } +} + +func (c *Conn) recv(ctx context.Context) error { + // not safe for concurrent reads + + // read a full header, ignore timeouts, as this is being ran in a loop + // TODO: TCP level deadlines? or just query level deadlines? + if c.timeout > 0 { + c.conn.SetReadDeadline(time.Time{}) + } + + headStartTime := time.Now() + // were just reading headers over and over and copy bodies + head, err := readHeader(c.r, c.headerBuf[:]) + headEndTime := time.Now() + if err != nil { + return err + } + + if c.frameObserver != nil { + c.frameObserver.ObserveFrameHeader(context.Background(), ObservedFrameHeader{ + Version: protoVersion(head.version), + Flags: head.flags, + Stream: int16(head.stream), + Opcode: frameOp(head.op), + Length: int32(head.length), + Start: headStartTime, + End: headEndTime, + Host: c.host, + }) + } + + if head.stream > c.streams.NumStreams { + return fmt.Errorf("gocql: frame header stream is beyond call expected bounds: %d", head.stream) + } else if head.stream == -1 { + // TODO: handle cassandra event frames, we shouldnt get any currently + framer := newFramer(c.compressor, c.version) + if err := framer.readFrame(c, &head); err != nil { + return err + } + go c.session.handleEvent(framer) + return nil + } else if head.stream <= 0 { + // reserved stream that we dont use, probably due to a protocol error + // or a bug in Cassandra, this should be an error, parse it and return. + framer := newFramer(c.compressor, c.version) + if err := framer.readFrame(c, &head); err != nil { + return err + } + + frame, err := framer.parseFrame() + if err != nil { + return err + } + + return &protocolError{ + frame: frame, + } + } + + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return ErrConnectionClosed + } + call, ok := c.calls[head.stream] + delete(c.calls, head.stream) + c.mu.Unlock() + if call == nil || !ok { + c.logger.Printf("gocql: received response for stream which has no handler: header=%v\n", head) + return c.discardFrame(head) + } else if head.stream != call.streamID { + panic(fmt.Sprintf("call has incorrect streamID: got %d expected %d", call.streamID, head.stream)) + } + + framer := newFramer(c.compressor, c.version) + + err = framer.readFrame(c, &head) + if err != nil { + // only net errors should cause the connection to be closed. Though + // cassandra returning corrupt frames will be returned here as well. + if _, ok := err.(net.Error); ok { + return err + } + } + + // we either, return a response to the caller, the caller timedout, or the + // connection has closed. Either way we should never block indefinatly here + select { + case call.resp <- callResp{framer: framer, err: err}: + case <-call.timeout: + c.releaseStream(call) + case <-ctx.Done(): + } + + return nil +} + +func (c *Conn) releaseStream(call *callReq) { + if call.timer != nil { + call.timer.Stop() + } + + c.streams.Clear(call.streamID) + + if call.streamObserverContext != nil { + call.streamObserverEndOnce.Do(func() { + call.streamObserverContext.StreamFinished(ObservedStream{ + Host: c.host, + }) + }) + } +} + +func (c *Conn) handleTimeout() { + if TimeoutLimit > 0 && atomic.AddInt64(&c.timeouts, 1) > TimeoutLimit { + c.closeWithError(ErrTooManyTimeouts) + } +} + +type callReq struct { + // resp will receive the frame that was sent as a response to this stream. + resp chan callResp + timeout chan struct{} // indicates to recv() that a call has timed out + streamID int // current stream in use + + timer *time.Timer + + // streamObserverContext is notified about events regarding this stream + streamObserverContext StreamObserverContext + + // streamObserverEndOnce ensures that either StreamAbandoned or StreamFinished is called, + // but not both. + streamObserverEndOnce sync.Once +} + +type callResp struct { + // framer is the response frame. + // May be nil if err is not nil. + framer *framer + // err is error encountered, if any. + err error +} + +// contextWriter is like io.Writer, but takes context as well. +type contextWriter interface { + // writeContext writes p to the connection. + // + // If ctx is canceled before we start writing p (e.g. during waiting while another write is currently in progress), + // p is not written and ctx.Err() is returned. Context is ignored after we start writing p (i.e. we don't interrupt + // blocked writes that are in progress) so that we always either write the full frame or not write it at all. + // + // It returns the number of bytes written from p (0 <= n <= len(p)) and any error that caused the write to stop + // early. writeContext must return a non-nil error if it returns n < len(p). writeContext must not modify the + // data in p, even temporarily. + writeContext(ctx context.Context, p []byte) (n int, err error) +} + +type deadlineWriter interface { + SetWriteDeadline(time.Time) error + io.Writer +} + +type deadlineContextWriter struct { + w deadlineWriter + timeout time.Duration + // semaphore protects critical section for SetWriteDeadline/Write. + // It is a channel with capacity 1. + semaphore chan struct{} + + // quit closed once the connection is closed. + quit chan struct{} +} + +// writeContext implements contextWriter. +func (c *deadlineContextWriter) writeContext(ctx context.Context, p []byte) (int, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-c.quit: + return 0, ErrConnectionClosed + case c.semaphore <- struct{}{}: + // acquired + } + + defer func() { + // release + <-c.semaphore + }() + + if c.timeout > 0 { + err := c.w.SetWriteDeadline(time.Now().Add(c.timeout)) + if err != nil { + return 0, err + } + } + return c.w.Write(p) +} + +func newWriteCoalescer(conn deadlineWriter, writeTimeout, coalesceDuration time.Duration, + quit <-chan struct{}) *writeCoalescer { + wc := &writeCoalescer{ + writeCh: make(chan writeRequest), + c: conn, + quit: quit, + timeout: writeTimeout, + } + go wc.writeFlusher(coalesceDuration) + return wc +} + +type writeCoalescer struct { + c deadlineWriter + + mu sync.Mutex + + quit <-chan struct{} + writeCh chan writeRequest + + timeout time.Duration + + testEnqueuedHook func() + testFlushedHook func() +} + +type writeRequest struct { + // resultChan is a channel (with buffer size 1) where to send results of the write. + resultChan chan<- writeResult + // data to write. + data []byte +} + +type writeResult struct { + n int + err error +} + +// writeContext implements contextWriter. +func (w *writeCoalescer) writeContext(ctx context.Context, p []byte) (int, error) { + resultChan := make(chan writeResult, 1) + wr := writeRequest{ + resultChan: resultChan, + data: p, + } + + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-w.quit: + return 0, io.EOF // TODO: better error here? + case w.writeCh <- wr: + // enqueued for writing + } + + if w.testEnqueuedHook != nil { + w.testEnqueuedHook() + } + + result := <-resultChan + return result.n, result.err +} + +func (w *writeCoalescer) writeFlusher(interval time.Duration) { + timer := time.NewTimer(interval) + defer timer.Stop() + + if !timer.Stop() { + <-timer.C + } + + w.writeFlusherImpl(timer.C, func() { timer.Reset(interval) }) +} + +func (w *writeCoalescer) writeFlusherImpl(timerC <-chan time.Time, resetTimer func()) { + running := false + + var buffers net.Buffers + var resultChans []chan<- writeResult + + for { + select { + case req := <-w.writeCh: + buffers = append(buffers, req.data) + resultChans = append(resultChans, req.resultChan) + if !running { + // Start timer on first write. + resetTimer() + running = true + } + case <-w.quit: + result := writeResult{ + n: 0, + err: io.EOF, // TODO: better error here? + } + // Unblock whoever was waiting. + for _, resultChan := range resultChans { + // resultChan has capacity 1, so it does not block. + resultChan <- result + } + return + case <-timerC: + running = false + w.flush(resultChans, buffers) + buffers = nil + resultChans = nil + if w.testFlushedHook != nil { + w.testFlushedHook() + } + } + } +} + +func (w *writeCoalescer) flush(resultChans []chan<- writeResult, buffers net.Buffers) { + // Flush everything we have so far. + if w.timeout > 0 { + err := w.c.SetWriteDeadline(time.Now().Add(w.timeout)) + if err != nil { + for i := range resultChans { + resultChans[i] <- writeResult{ + n: 0, + err: err, + } + } + return + } + } + // Copy buffers because WriteTo modifies buffers in-place. + buffers2 := make(net.Buffers, len(buffers)) + copy(buffers2, buffers) + n, err := buffers2.WriteTo(w.c) + // Writes of bytes before n succeeded, writes of bytes starting from n failed with err. + // Use n as remaining byte counter. + for i := range buffers { + if int64(len(buffers[i])) <= n { + // this buffer was fully written. + resultChans[i] <- writeResult{ + n: len(buffers[i]), + err: nil, + } + n -= int64(len(buffers[i])) + } else { + // this buffer was not (fully) written. + resultChans[i] <- writeResult{ + n: int(n), + err: err, + } + n = 0 + } + } +} + +// addCall attempts to add a call to c.calls. +// It fails with error if the connection already started closing or if a call for the given stream +// already exists. +func (c *Conn) addCall(call *callReq) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return ErrConnectionClosed + } + existingCall := c.calls[call.streamID] + if existingCall != nil { + return fmt.Errorf("attempting to use stream already in use: %d -> %d", call.streamID, + existingCall.streamID) + } + c.calls[call.streamID] = call + return nil +} + +func (c *Conn) exec(ctx context.Context, req frameBuilder, tracer Tracer) (*framer, error) { + if ctxErr := ctx.Err(); ctxErr != nil { + return nil, ctxErr + } + + // TODO: move tracer onto conn + stream, ok := c.streams.GetStream() + if !ok { + return nil, ErrNoStreams + } + + // resp is basically a waiting semaphore protecting the framer + framer := newFramer(c.compressor, c.version) + + call := &callReq{ + timeout: make(chan struct{}), + streamID: stream, + resp: make(chan callResp), + } + + if c.streamObserver != nil { + call.streamObserverContext = c.streamObserver.StreamContext(ctx) + } + + if err := c.addCall(call); err != nil { + return nil, err + } + + // After this point, we need to either read from call.resp or close(call.timeout) + // since closeWithError can try to write a connection close error to call.resp. + // If we don't close(call.timeout) or read from call.resp, closeWithError can deadlock. + + if tracer != nil { + framer.trace() + } + + if call.streamObserverContext != nil { + call.streamObserverContext.StreamStarted(ObservedStream{ + Host: c.host, + }) + } + + err := req.buildFrame(framer, stream) + if err != nil { + // closeWithError will block waiting for this stream to either receive a response + // or for us to timeout. + close(call.timeout) + // We failed to serialize the frame into a buffer. + // This should not affect the connection as we didn't write anything. We just free the current call. + c.mu.Lock() + if !c.closed { + delete(c.calls, call.streamID) + } + c.mu.Unlock() + // We need to release the stream after we remove the call from c.calls, otherwise the existingCall != nil + // check above could fail. + c.releaseStream(call) + return nil, err + } + + n, err := c.w.writeContext(ctx, framer.buf) + if err != nil { + // closeWithError will block waiting for this stream to either receive a response + // or for us to timeout, close the timeout chan here. Im not entirely sure + // but we should not get a response after an error on the write side. + close(call.timeout) + if (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)) && n == 0 { + // We have not started to write this frame. + // Release the stream as no response can come from the server on the stream. + c.mu.Lock() + if !c.closed { + delete(c.calls, call.streamID) + } + c.mu.Unlock() + // We need to release the stream after we remove the call from c.calls, otherwise the existingCall != nil + // check above could fail. + c.releaseStream(call) + } else { + // I think this is the correct thing to do, im not entirely sure. It is not + // ideal as readers might still get some data, but they probably wont. + // Here we need to be careful as the stream is not available and if all + // writes just timeout or fail then the pool might use this connection to + // send a frame on, with all the streams used up and not returned. + c.closeWithError(err) + } + return nil, err + } + + var timeoutCh <-chan time.Time + if c.timeout > 0 { + if call.timer == nil { + call.timer = time.NewTimer(0) + <-call.timer.C + } else { + if !call.timer.Stop() { + select { + case <-call.timer.C: + default: + } + } + } + + call.timer.Reset(c.timeout) + timeoutCh = call.timer.C + } + + var ctxDone <-chan struct{} + if ctx != nil { + ctxDone = ctx.Done() + } + + select { + case resp := <-call.resp: + close(call.timeout) + if resp.err != nil { + if !c.Closed() { + // if the connection is closed then we cant release the stream, + // this is because the request is still outstanding and we have + // been handed another error from another stream which caused the + // connection to close. + c.releaseStream(call) + } + return nil, resp.err + } + // dont release the stream if detect a timeout as another request can reuse + // that stream and get a response for the old request, which we have no + // easy way of detecting. + // + // Ensure that the stream is not released if there are potentially outstanding + // requests on the stream to prevent nil pointer dereferences in recv(). + defer c.releaseStream(call) + + if v := resp.framer.header.version.version(); v != c.version { + return nil, NewErrProtocol("unexpected protocol version in response: got %d expected %d", v, c.version) + } + + return resp.framer, nil + case <-timeoutCh: + close(call.timeout) + c.handleTimeout() + return nil, ErrTimeoutNoResponse + case <-ctxDone: + close(call.timeout) + return nil, ctx.Err() + case <-c.ctx.Done(): + close(call.timeout) + return nil, ErrConnectionClosed + } +} + +// ObservedStream observes a single request/response stream. +type ObservedStream struct { + // Host of the connection used to send the stream. + Host *HostInfo +} + +// StreamObserver is notified about request/response pairs. +// Streams are created for executing queries/batches or +// internal requests to the database and might live longer than +// execution of the query - the stream is still tracked until +// response arrives so that stream IDs are not reused. +type StreamObserver interface { + // StreamContext is called before creating a new stream. + // ctx is context passed to Session.Query / Session.Batch, + // but might also be an internal context (for example + // for internal requests that use control connection). + // StreamContext might return nil if it is not interested + // in the details of this stream. + // StreamContext is called before the stream is created + // and the returned StreamObserverContext might be discarded + // without any methods called on the StreamObserverContext if + // creation of the stream fails. + // Note that if you don't need to track per-stream data, + // you can always return the same StreamObserverContext. + StreamContext(ctx context.Context) StreamObserverContext +} + +// StreamObserverContext is notified about state of a stream. +// A stream is started every time a request is written to the server +// and is finished when a response is received. +// It is abandoned when the underlying network connection is closed +// before receiving a response. +type StreamObserverContext interface { + // StreamStarted is called when the stream is started. + // This happens just before a request is written to the wire. + StreamStarted(observedStream ObservedStream) + + // StreamAbandoned is called when we stop waiting for response. + // This happens when the underlying network connection is closed. + // StreamFinished won't be called if StreamAbandoned is. + StreamAbandoned(observedStream ObservedStream) + + // StreamFinished is called when we receive a response for the stream. + StreamFinished(observedStream ObservedStream) +} + +type preparedStatment struct { + id []byte + request preparedMetadata + response resultMetadata +} + +type inflightPrepare struct { + done chan struct{} + err error + + preparedStatment *preparedStatment +} + +func (c *Conn) prepareStatement(ctx context.Context, stmt string, tracer Tracer) (*preparedStatment, error) { + stmtCacheKey := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, stmt) + flight, ok := c.session.stmtsLRU.execIfMissing(stmtCacheKey, func(lru *lru.Cache) *inflightPrepare { + flight := &inflightPrepare{ + done: make(chan struct{}), + } + lru.Add(stmtCacheKey, flight) + return flight + }) + + if !ok { + go func() { + defer close(flight.done) + + prep := &writePrepareFrame{ + statement: stmt, + } + if c.version > protoVersion4 { + prep.keyspace = c.currentKeyspace + } + + // we won the race to do the load, if our context is canceled we shouldnt + // stop the load as other callers are waiting for it but this caller should get + // their context cancelled error. + framer, err := c.exec(c.ctx, prep, tracer) + if err != nil { + flight.err = err + c.session.stmtsLRU.remove(stmtCacheKey) + return + } + + frame, err := framer.parseFrame() + if err != nil { + flight.err = err + c.session.stmtsLRU.remove(stmtCacheKey) + return + } + + // TODO(zariel): tidy this up, simplify handling of frame parsing so its not duplicated + // everytime we need to parse a frame. + if len(framer.traceID) > 0 && tracer != nil { + tracer.Trace(framer.traceID) + } + + switch x := frame.(type) { + case *resultPreparedFrame: + flight.preparedStatment = &preparedStatment{ + // defensively copy as we will recycle the underlying buffer after we + // return. + id: copyBytes(x.preparedID), + // the type info's should _not_ have a reference to the framers read buffer, + // therefore we can just copy them directly. + request: x.reqMeta, + response: x.respMeta, + } + case error: + flight.err = x + default: + flight.err = NewErrProtocol("Unknown type in response to prepare frame: %s", x) + } + + if flight.err != nil { + c.session.stmtsLRU.remove(stmtCacheKey) + } + }() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-flight.done: + return flight.preparedStatment, flight.err + } +} + +func marshalQueryValue(typ TypeInfo, value interface{}, dst *queryValues) error { + if named, ok := value.(*namedValue); ok { + dst.name = named.name + value = named.value + } + + if _, ok := value.(unsetColumn); !ok { + val, err := Marshal(typ, value) + if err != nil { + return err + } + + dst.value = val + } else { + dst.isUnset = true + } + + return nil +} + +func (c *Conn) executeQuery(ctx context.Context, qry *Query) *Iter { + params := queryParams{ + consistency: qry.cons, + } + + // frame checks that it is not 0 + params.serialConsistency = qry.serialCons + params.defaultTimestamp = qry.defaultTimestamp + params.defaultTimestampValue = qry.defaultTimestampValue + + if len(qry.pageState) > 0 { + params.pagingState = qry.pageState + } + if qry.pageSize > 0 { + params.pageSize = qry.pageSize + } + if c.version > protoVersion4 { + params.keyspace = c.currentKeyspace + } + + var ( + frame frameBuilder + info *preparedStatment + ) + + if !qry.skipPrepare && qry.shouldPrepare() { + // Prepare all DML queries. Other queries can not be prepared. + var err error + info, err = c.prepareStatement(ctx, qry.stmt, qry.trace) + if err != nil { + return &Iter{err: err} + } + + values := qry.values + if qry.binding != nil { + values, err = qry.binding(&QueryInfo{ + Id: info.id, + Args: info.request.columns, + Rval: info.response.columns, + PKeyColumns: info.request.pkeyColumns, + }) + + if err != nil { + return &Iter{err: err} + } + } + + if len(values) != info.request.actualColCount { + return &Iter{err: fmt.Errorf("gocql: expected %d values send got %d", info.request.actualColCount, len(values))} + } + + params.values = make([]queryValues, len(values)) + for i := 0; i < len(values); i++ { + v := ¶ms.values[i] + value := values[i] + typ := info.request.columns[i].TypeInfo + if err := marshalQueryValue(typ, value, v); err != nil { + return &Iter{err: err} + } + } + + params.skipMeta = !(c.session.cfg.DisableSkipMetadata || qry.disableSkipMetadata) + + frame = &writeExecuteFrame{ + preparedID: info.id, + params: params, + customPayload: qry.customPayload, + } + + // Set "keyspace" and "table" property in the query if it is present in preparedMetadata + qry.routingInfo.mu.Lock() + qry.routingInfo.keyspace = info.request.keyspace + qry.routingInfo.table = info.request.table + qry.routingInfo.mu.Unlock() + } else { + frame = &writeQueryFrame{ + statement: qry.stmt, + params: params, + customPayload: qry.customPayload, + } + } + + framer, err := c.exec(ctx, frame, qry.trace) + if err != nil { + return &Iter{err: err} + } + + resp, err := framer.parseFrame() + if err != nil { + return &Iter{err: err} + } + + if len(framer.traceID) > 0 && qry.trace != nil { + qry.trace.Trace(framer.traceID) + } + + switch x := resp.(type) { + case *resultVoidFrame: + return &Iter{framer: framer} + case *resultRowsFrame: + iter := &Iter{ + meta: x.meta, + framer: framer, + numRows: x.numRows, + } + + if params.skipMeta { + if info != nil { + iter.meta = info.response + iter.meta.pagingState = copyBytes(x.meta.pagingState) + } else { + return &Iter{framer: framer, err: errors.New("gocql: did not receive metadata but prepared info is nil")} + } + } else { + iter.meta = x.meta + } + + if x.meta.morePages() && !qry.disableAutoPage { + newQry := new(Query) + *newQry = *qry + newQry.pageState = copyBytes(x.meta.pagingState) + newQry.metrics = &queryMetrics{m: make(map[string]*hostMetrics)} + + iter.next = &nextIter{ + qry: newQry, + pos: int((1 - qry.prefetch) * float64(x.numRows)), + } + + if iter.next.pos < 1 { + iter.next.pos = 1 + } + } + + return iter + case *resultKeyspaceFrame: + return &Iter{framer: framer} + case *schemaChangeKeyspace, *schemaChangeTable, *schemaChangeFunction, *schemaChangeAggregate, *schemaChangeType: + iter := &Iter{framer: framer} + if err := c.awaitSchemaAgreement(ctx); err != nil { + // TODO: should have this behind a flag + c.logger.Println(err) + } + // dont return an error from this, might be a good idea to give a warning + // though. The impact of this returning an error would be that the cluster + // is not consistent with regards to its schema. + return iter + case *RequestErrUnprepared: + stmtCacheKey := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, qry.stmt) + c.session.stmtsLRU.evictPreparedID(stmtCacheKey, x.StatementId) + return c.executeQuery(ctx, qry) + case error: + return &Iter{err: x, framer: framer} + default: + return &Iter{ + err: NewErrProtocol("Unknown type in response to execute query (%T): %s", x, x), + framer: framer, + } + } +} + +func (c *Conn) Pick(qry *Query) *Conn { + if c.Closed() { + return nil + } + return c +} + +func (c *Conn) Closed() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closed +} + +func (c *Conn) Address() string { + return c.addr +} + +func (c *Conn) AvailableStreams() int { + return c.streams.Available() +} + +func (c *Conn) UseKeyspace(keyspace string) error { + q := &writeQueryFrame{statement: `USE "` + keyspace + `"`} + q.params.consistency = c.session.cons + + framer, err := c.exec(c.ctx, q, nil) + if err != nil { + return err + } + + resp, err := framer.parseFrame() + if err != nil { + return err + } + + switch x := resp.(type) { + case *resultKeyspaceFrame: + case error: + return x + default: + return NewErrProtocol("unknown frame in response to USE: %v", x) + } + + c.currentKeyspace = keyspace + + return nil +} + +func (c *Conn) executeBatch(ctx context.Context, batch *Batch) *Iter { + if c.version == protoVersion1 { + return &Iter{err: ErrUnsupported} + } + + n := len(batch.Entries) + req := &writeBatchFrame{ + typ: batch.Type, + statements: make([]batchStatment, n), + consistency: batch.Cons, + serialConsistency: batch.serialCons, + defaultTimestamp: batch.defaultTimestamp, + defaultTimestampValue: batch.defaultTimestampValue, + customPayload: batch.CustomPayload, + } + + stmts := make(map[string]string, len(batch.Entries)) + + for i := 0; i < n; i++ { + entry := &batch.Entries[i] + b := &req.statements[i] + + if len(entry.Args) > 0 || entry.binding != nil { + info, err := c.prepareStatement(batch.Context(), entry.Stmt, batch.trace) + if err != nil { + return &Iter{err: err} + } + + var values []interface{} + if entry.binding == nil { + values = entry.Args + } else { + values, err = entry.binding(&QueryInfo{ + Id: info.id, + Args: info.request.columns, + Rval: info.response.columns, + PKeyColumns: info.request.pkeyColumns, + }) + if err != nil { + return &Iter{err: err} + } + } + + if len(values) != info.request.actualColCount { + return &Iter{err: fmt.Errorf("gocql: batch statement %d expected %d values send got %d", i, info.request.actualColCount, len(values))} + } + + b.preparedID = info.id + stmts[string(info.id)] = entry.Stmt + + b.values = make([]queryValues, info.request.actualColCount) + + for j := 0; j < info.request.actualColCount; j++ { + v := &b.values[j] + value := values[j] + typ := info.request.columns[j].TypeInfo + if err := marshalQueryValue(typ, value, v); err != nil { + return &Iter{err: err} + } + } + } else { + b.statement = entry.Stmt + } + } + + framer, err := c.exec(batch.Context(), req, batch.trace) + if err != nil { + return &Iter{err: err} + } + + resp, err := framer.parseFrame() + if err != nil { + return &Iter{err: err, framer: framer} + } + + if len(framer.traceID) > 0 && batch.trace != nil { + batch.trace.Trace(framer.traceID) + } + + switch x := resp.(type) { + case *resultVoidFrame: + return &Iter{} + case *RequestErrUnprepared: + stmt, found := stmts[string(x.StatementId)] + if found { + key := c.session.stmtsLRU.keyFor(c.host.HostID(), c.currentKeyspace, stmt) + c.session.stmtsLRU.evictPreparedID(key, x.StatementId) + } + return c.executeBatch(ctx, batch) + case *resultRowsFrame: + iter := &Iter{ + meta: x.meta, + framer: framer, + numRows: x.numRows, + } + + return iter + case error: + return &Iter{err: x, framer: framer} + default: + return &Iter{err: NewErrProtocol("Unknown type in response to batch statement: %s", x), framer: framer} + } +} + +func (c *Conn) query(ctx context.Context, statement string, values ...interface{}) (iter *Iter) { + q := c.session.Query(statement, values...).Consistency(One).Trace(nil) + q.skipPrepare = true + q.disableSkipMetadata = true + // we want to keep the query on this connection + q.conn = c + return c.executeQuery(ctx, q) +} + +func (c *Conn) querySystemPeers(ctx context.Context, version cassVersion) *Iter { + const ( + peerSchema = "SELECT * FROM system.peers" + peerV2Schemas = "SELECT * FROM system.peers_v2" + ) + + c.mu.Lock() + isSchemaV2 := c.isSchemaV2 + c.mu.Unlock() + + if version.AtLeast(4, 0, 0) && isSchemaV2 { + // Try "system.peers_v2" and fallback to "system.peers" if it's not found + iter := c.query(ctx, peerV2Schemas) + + err := iter.checkErrAndNotFound() + if err != nil { + if errFrame, ok := err.(errorFrame); ok && errFrame.code == ErrCodeInvalid { // system.peers_v2 not found, try system.peers + c.mu.Lock() + c.isSchemaV2 = false + c.mu.Unlock() + return c.query(ctx, peerSchema) + } else { + return iter + } + } + return iter + } else { + return c.query(ctx, peerSchema) + } +} + +func (c *Conn) querySystemLocal(ctx context.Context) *Iter { + return c.query(ctx, "SELECT * FROM system.local WHERE key='local'") +} + +func (c *Conn) awaitSchemaAgreement(ctx context.Context) (err error) { + const localSchemas = "SELECT schema_version FROM system.local WHERE key='local'" + + var versions map[string]struct{} + var schemaVersion string + + endDeadline := time.Now().Add(c.session.cfg.MaxWaitSchemaAgreement) + + for time.Now().Before(endDeadline) { + iter := c.querySystemPeers(ctx, c.host.version) + + versions = make(map[string]struct{}) + + rows, err := iter.SliceMap() + if err != nil { + goto cont + } + + for _, row := range rows { + host, err := c.session.hostInfoFromMap(row, &HostInfo{connectAddress: c.host.ConnectAddress(), port: c.session.cfg.Port}) + if err != nil { + goto cont + } + if !isValidPeer(host) || host.schemaVersion == "" { + c.logger.Printf("invalid peer or peer with empty schema_version: peer=%q", host) + continue + } + + versions[host.schemaVersion] = struct{}{} + } + + if err = iter.Close(); err != nil { + goto cont + } + + iter = c.query(ctx, localSchemas) + for iter.Scan(&schemaVersion) { + versions[schemaVersion] = struct{}{} + schemaVersion = "" + } + + if err = iter.Close(); err != nil { + goto cont + } + + if len(versions) <= 1 { + return nil + } + + cont: + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(200 * time.Millisecond): + } + } + + if err != nil { + return err + } + + schemas := make([]string, 0, len(versions)) + for schema := range versions { + schemas = append(schemas, schema) + } + + // not exported + return fmt.Errorf("gocql: cluster schema versions not consistent: %+v", schemas) +} + +var ( + ErrQueryArgLength = errors.New("gocql: query argument length mismatch") + ErrTimeoutNoResponse = errors.New("gocql: no response received from cassandra within timeout period") + ErrTooManyTimeouts = errors.New("gocql: too many query timeouts on the connection") + ErrConnectionClosed = errors.New("gocql: connection closed waiting for response") + ErrNoStreams = errors.New("gocql: no streams available on connection") +) diff --git a/vendor/github.com/gocql/gocql/connectionpool.go b/vendor/github.com/gocql/gocql/connectionpool.go new file mode 100644 index 000000000..c81d9d185 --- /dev/null +++ b/vendor/github.com/gocql/gocql/connectionpool.go @@ -0,0 +1,622 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" +) + +// interface to implement to receive the host information +type SetHosts interface { + SetHosts(hosts []*HostInfo) +} + +// interface to implement to receive the partitioner value +type SetPartitioner interface { + SetPartitioner(partitioner string) +} + +func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) { + // Config.InsecureSkipVerify | EnableHostVerification | Result + // Config is nil | true | verify host + // Config is nil | false | do not verify host + // false | false | verify host + // true | false | do not verify host + // false | true | verify host + // true | true | verify host + var tlsConfig *tls.Config + if sslOpts.Config == nil { + tlsConfig = &tls.Config{ + InsecureSkipVerify: !sslOpts.EnableHostVerification, + } + } else { + // use clone to avoid race. + tlsConfig = sslOpts.Config.Clone() + } + + if tlsConfig.InsecureSkipVerify && sslOpts.EnableHostVerification { + tlsConfig.InsecureSkipVerify = false + } + + // ca cert is optional + if sslOpts.CaPath != "" { + if tlsConfig.RootCAs == nil { + tlsConfig.RootCAs = x509.NewCertPool() + } + + pem, err := ioutil.ReadFile(sslOpts.CaPath) + if err != nil { + return nil, fmt.Errorf("connectionpool: unable to open CA certs: %v", err) + } + + if !tlsConfig.RootCAs.AppendCertsFromPEM(pem) { + return nil, errors.New("connectionpool: failed parsing or CA certs") + } + } + + if sslOpts.CertPath != "" || sslOpts.KeyPath != "" { + mycert, err := tls.LoadX509KeyPair(sslOpts.CertPath, sslOpts.KeyPath) + if err != nil { + return nil, fmt.Errorf("connectionpool: unable to load X509 key pair: %v", err) + } + tlsConfig.Certificates = append(tlsConfig.Certificates, mycert) + } + + return tlsConfig, nil +} + +type policyConnPool struct { + session *Session + + port int + numConns int + keyspace string + + mu sync.RWMutex + hostConnPools map[string]*hostConnPool +} + +func connConfig(cfg *ClusterConfig) (*ConnConfig, error) { + var ( + err error + hostDialer HostDialer + ) + + hostDialer = cfg.HostDialer + if hostDialer == nil { + var tlsConfig *tls.Config + + // TODO(zariel): move tls config setup into session init. + if cfg.SslOpts != nil { + tlsConfig, err = setupTLSConfig(cfg.SslOpts) + if err != nil { + return nil, err + } + } + + dialer := cfg.Dialer + if dialer == nil { + d := &net.Dialer{ + Timeout: cfg.ConnectTimeout, + } + if cfg.SocketKeepalive > 0 { + d.KeepAlive = cfg.SocketKeepalive + } + dialer = d + } + + hostDialer = &defaultHostDialer{ + dialer: dialer, + tlsConfig: tlsConfig, + } + } + + return &ConnConfig{ + ProtoVersion: cfg.ProtoVersion, + CQLVersion: cfg.CQLVersion, + Timeout: cfg.Timeout, + WriteTimeout: cfg.WriteTimeout, + ConnectTimeout: cfg.ConnectTimeout, + Dialer: cfg.Dialer, + HostDialer: hostDialer, + Compressor: cfg.Compressor, + Authenticator: cfg.Authenticator, + AuthProvider: cfg.AuthProvider, + Keepalive: cfg.SocketKeepalive, + Logger: cfg.logger(), + }, nil +} + +func newPolicyConnPool(session *Session) *policyConnPool { + // create the pool + pool := &policyConnPool{ + session: session, + port: session.cfg.Port, + numConns: session.cfg.NumConns, + keyspace: session.cfg.Keyspace, + hostConnPools: map[string]*hostConnPool{}, + } + + return pool +} + +func (p *policyConnPool) SetHosts(hosts []*HostInfo) { + p.mu.Lock() + defer p.mu.Unlock() + + toRemove := make(map[string]struct{}) + for hostID := range p.hostConnPools { + toRemove[hostID] = struct{}{} + } + + pools := make(chan *hostConnPool) + createCount := 0 + for _, host := range hosts { + if !host.IsUp() { + // don't create a connection pool for a down host + continue + } + hostID := host.HostID() + if _, exists := p.hostConnPools[hostID]; exists { + // still have this host, so don't remove it + delete(toRemove, hostID) + continue + } + + createCount++ + go func(host *HostInfo) { + // create a connection pool for the host + pools <- newHostConnPool( + p.session, + host, + p.port, + p.numConns, + p.keyspace, + ) + }(host) + } + + // add created pools + for createCount > 0 { + pool := <-pools + createCount-- + if pool.Size() > 0 { + // add pool only if there a connections available + p.hostConnPools[pool.host.HostID()] = pool + } + } + + for addr := range toRemove { + pool := p.hostConnPools[addr] + delete(p.hostConnPools, addr) + go pool.Close() + } +} + +func (p *policyConnPool) Size() int { + p.mu.RLock() + count := 0 + for _, pool := range p.hostConnPools { + count += pool.Size() + } + p.mu.RUnlock() + + return count +} + +func (p *policyConnPool) getPool(host *HostInfo) (pool *hostConnPool, ok bool) { + hostID := host.HostID() + p.mu.RLock() + pool, ok = p.hostConnPools[hostID] + p.mu.RUnlock() + return +} + +func (p *policyConnPool) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + // close the pools + for addr, pool := range p.hostConnPools { + delete(p.hostConnPools, addr) + pool.Close() + } +} + +func (p *policyConnPool) addHost(host *HostInfo) { + hostID := host.HostID() + p.mu.Lock() + pool, ok := p.hostConnPools[hostID] + if !ok { + pool = newHostConnPool( + p.session, + host, + host.Port(), // TODO: if port == 0 use pool.port? + p.numConns, + p.keyspace, + ) + + p.hostConnPools[hostID] = pool + } + p.mu.Unlock() + + pool.fill() +} + +func (p *policyConnPool) removeHost(hostID string) { + p.mu.Lock() + pool, ok := p.hostConnPools[hostID] + if !ok { + p.mu.Unlock() + return + } + + delete(p.hostConnPools, hostID) + p.mu.Unlock() + + go pool.Close() +} + +// hostConnPool is a connection pool for a single host. +// Connection selection is based on a provided ConnSelectionPolicy +type hostConnPool struct { + session *Session + host *HostInfo + port int + size int + keyspace string + // protection for conns, closed, filling + mu sync.RWMutex + conns []*Conn + closed bool + filling bool + + pos uint32 + logger StdLogger +} + +func (h *hostConnPool) String() string { + h.mu.RLock() + defer h.mu.RUnlock() + return fmt.Sprintf("[filling=%v closed=%v conns=%v size=%v host=%v]", + h.filling, h.closed, len(h.conns), h.size, h.host) +} + +func newHostConnPool(session *Session, host *HostInfo, port, size int, + keyspace string) *hostConnPool { + + pool := &hostConnPool{ + session: session, + host: host, + port: port, + size: size, + keyspace: keyspace, + conns: make([]*Conn, 0, size), + filling: false, + closed: false, + logger: session.logger, + } + + // the pool is not filled or connected + return pool +} + +// Pick a connection from this connection pool for the given query. +func (pool *hostConnPool) Pick() *Conn { + pool.mu.RLock() + defer pool.mu.RUnlock() + + if pool.closed { + return nil + } + + size := len(pool.conns) + if size < pool.size { + // try to fill the pool + go pool.fill() + + if size == 0 { + return nil + } + } + + pos := int(atomic.AddUint32(&pool.pos, 1) - 1) + + var ( + leastBusyConn *Conn + streamsAvailable int + ) + + // find the conn which has the most available streams, this is racy + for i := 0; i < size; i++ { + conn := pool.conns[(pos+i)%size] + if streams := conn.AvailableStreams(); streams > streamsAvailable { + leastBusyConn = conn + streamsAvailable = streams + } + } + + return leastBusyConn +} + +// Size returns the number of connections currently active in the pool +func (pool *hostConnPool) Size() int { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return len(pool.conns) +} + +// Close the connection pool +func (pool *hostConnPool) Close() { + pool.mu.Lock() + + if pool.closed { + pool.mu.Unlock() + return + } + pool.closed = true + + // ensure we dont try to reacquire the lock in handleError + // TODO: improve this as the following can happen + // 1) we have locked pool.mu write lock + // 2) conn.Close calls conn.closeWithError(nil) + // 3) conn.closeWithError calls conn.Close() which returns an error + // 4) conn.closeWithError calls pool.HandleError with the error from conn.Close + // 5) pool.HandleError tries to lock pool.mu + // deadlock + + // empty the pool + conns := pool.conns + pool.conns = nil + + pool.mu.Unlock() + + // close the connections + for _, conn := range conns { + conn.Close() + } +} + +// Fill the connection pool +func (pool *hostConnPool) fill() { + pool.mu.RLock() + // avoid filling a closed pool, or concurrent filling + if pool.closed || pool.filling { + pool.mu.RUnlock() + return + } + + // determine the filling work to be done + startCount := len(pool.conns) + fillCount := pool.size - startCount + + // avoid filling a full (or overfull) pool + if fillCount <= 0 { + pool.mu.RUnlock() + return + } + + // switch from read to write lock + pool.mu.RUnlock() + pool.mu.Lock() + + // double check everything since the lock was released + startCount = len(pool.conns) + fillCount = pool.size - startCount + if pool.closed || pool.filling || fillCount <= 0 { + // looks like another goroutine already beat this + // goroutine to the filling + pool.mu.Unlock() + return + } + + // ok fill the pool + pool.filling = true + + // allow others to access the pool while filling + pool.mu.Unlock() + // only this goroutine should make calls to fill/empty the pool at this + // point until after this routine or its subordinates calls + // fillingStopped + + // fill only the first connection synchronously + if startCount == 0 { + err := pool.connect() + pool.logConnectErr(err) + + if err != nil { + // probably unreachable host + pool.fillingStopped(err) + return + } + // notify the session that this node is connected + go pool.session.handleNodeConnected(pool.host) + + // filled one + fillCount-- + } + + // fill the rest of the pool asynchronously + go func() { + err := pool.connectMany(fillCount) + + // mark the end of filling + pool.fillingStopped(err) + + if err == nil && startCount > 0 { + // notify the session that this node is connected again + go pool.session.handleNodeConnected(pool.host) + } + }() +} + +func (pool *hostConnPool) logConnectErr(err error) { + if opErr, ok := err.(*net.OpError); ok && (opErr.Op == "dial" || opErr.Op == "read") { + // connection refused + // these are typical during a node outage so avoid log spam. + if gocqlDebug { + pool.logger.Printf("gocql: unable to dial %q: %v\n", pool.host, err) + } + } else if err != nil { + // unexpected error + pool.logger.Printf("error: failed to connect to %q due to error: %v", pool.host, err) + } +} + +// transition back to a not-filling state. +func (pool *hostConnPool) fillingStopped(err error) { + if err != nil { + if gocqlDebug { + pool.logger.Printf("gocql: filling stopped %q: %v\n", pool.host.ConnectAddress(), err) + } + // wait for some time to avoid back-to-back filling + // this provides some time between failed attempts + // to fill the pool for the host to recover + time.Sleep(time.Duration(rand.Int31n(100)+31) * time.Millisecond) + } + + pool.mu.Lock() + pool.filling = false + count := len(pool.conns) + host := pool.host + port := pool.port + pool.mu.Unlock() + + // if we errored and the size is now zero, make sure the host is marked as down + // see https://github.com/gocql/gocql/issues/1614 + if gocqlDebug { + pool.logger.Printf("gocql: conns of pool after stopped %q: %v\n", host.ConnectAddress(), count) + } + if err != nil && count == 0 { + if pool.session.cfg.ConvictionPolicy.AddFailure(err, host) { + pool.session.handleNodeDown(host.ConnectAddress(), port) + } + } +} + +// connectMany creates new connections concurrent. +func (pool *hostConnPool) connectMany(count int) error { + if count == 0 { + return nil + } + var ( + wg sync.WaitGroup + mu sync.Mutex + connectErr error + ) + wg.Add(count) + for i := 0; i < count; i++ { + go func() { + defer wg.Done() + err := pool.connect() + pool.logConnectErr(err) + if err != nil { + mu.Lock() + connectErr = err + mu.Unlock() + } + }() + } + // wait for all connections are done + wg.Wait() + + return connectErr +} + +// create a new connection to the host and add it to the pool +func (pool *hostConnPool) connect() (err error) { + // TODO: provide a more robust connection retry mechanism, we should also + // be able to detect hosts that come up by trying to connect to downed ones. + // try to connect + var conn *Conn + reconnectionPolicy := pool.session.cfg.ReconnectionPolicy + for i := 0; i < reconnectionPolicy.GetMaxRetries(); i++ { + conn, err = pool.session.connect(pool.session.ctx, pool.host, pool) + if err == nil { + break + } + if opErr, isOpErr := err.(*net.OpError); isOpErr { + // if the error is not a temporary error (ex: network unreachable) don't + // retry + if !opErr.Temporary() { + break + } + } + if gocqlDebug { + pool.logger.Printf("gocql: connection failed %q: %v, reconnecting with %T\n", + pool.host.ConnectAddress(), err, reconnectionPolicy) + } + time.Sleep(reconnectionPolicy.GetInterval(i)) + } + + if err != nil { + return err + } + + if pool.keyspace != "" { + // set the keyspace + if err = conn.UseKeyspace(pool.keyspace); err != nil { + conn.Close() + return err + } + } + + // add the Conn to the pool + pool.mu.Lock() + defer pool.mu.Unlock() + + if pool.closed { + conn.Close() + return nil + } + + pool.conns = append(pool.conns, conn) + + return nil +} + +// handle any error from a Conn +func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) { + if !closed { + // still an open connection, so continue using it + return + } + + // TODO: track the number of errors per host and detect when a host is dead, + // then also have something which can detect when a host comes back. + pool.mu.Lock() + defer pool.mu.Unlock() + + if pool.closed { + // pool closed + return + } + + if gocqlDebug { + pool.logger.Printf("gocql: pool connection error %q: %v\n", conn.addr, err) + } + + // find the connection index + for i, candidate := range pool.conns { + if candidate == conn { + // remove the connection, not preserving order + pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1] + + // lost a connection, so fill the pool + go pool.fill() + break + } + } +} diff --git a/vendor/github.com/gocql/gocql/control.go b/vendor/github.com/gocql/gocql/control.go new file mode 100644 index 000000000..47ec7abaf --- /dev/null +++ b/vendor/github.com/gocql/gocql/control.go @@ -0,0 +1,522 @@ +package gocql + +import ( + "context" + crand "crypto/rand" + "errors" + "fmt" + "math/rand" + "net" + "os" + "regexp" + "strconv" + "sync" + "sync/atomic" + "time" +) + +var ( + randr *rand.Rand + mutRandr sync.Mutex +) + +func init() { + b := make([]byte, 4) + if _, err := crand.Read(b); err != nil { + panic(fmt.Sprintf("unable to seed random number generator: %v", err)) + } + + randr = rand.New(rand.NewSource(int64(readInt(b)))) +} + +const ( + controlConnStarting = 0 + controlConnStarted = 1 + controlConnClosing = -1 +) + +// Ensure that the atomic variable is aligned to a 64bit boundary +// so that atomic operations can be applied on 32bit architectures. +type controlConn struct { + state int32 + reconnecting int32 + + session *Session + conn atomic.Value + + retry RetryPolicy + + quit chan struct{} +} + +func createControlConn(session *Session) *controlConn { + control := &controlConn{ + session: session, + quit: make(chan struct{}), + retry: &SimpleRetryPolicy{NumRetries: 3}, + } + + control.conn.Store((*connHost)(nil)) + + return control +} + +func (c *controlConn) heartBeat() { + if !atomic.CompareAndSwapInt32(&c.state, controlConnStarting, controlConnStarted) { + return + } + + sleepTime := 1 * time.Second + timer := time.NewTimer(sleepTime) + defer timer.Stop() + + for { + timer.Reset(sleepTime) + + select { + case <-c.quit: + return + case <-timer.C: + } + + resp, err := c.writeFrame(&writeOptionsFrame{}) + if err != nil { + goto reconn + } + + switch resp.(type) { + case *supportedFrame: + // Everything ok + sleepTime = 5 * time.Second + continue + case error: + goto reconn + default: + panic(fmt.Sprintf("gocql: unknown frame in response to options: %T", resp)) + } + + reconn: + // try to connect a bit faster + sleepTime = 1 * time.Second + c.reconnect() + continue + } +} + +var hostLookupPreferV4 = os.Getenv("GOCQL_HOST_LOOKUP_PREFER_V4") == "true" + +func hostInfo(addr string, defaultPort int) ([]*HostInfo, error) { + var port int + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + host = addr + port = defaultPort + } else { + port, err = strconv.Atoi(portStr) + if err != nil { + return nil, err + } + } + + var hosts []*HostInfo + + // Check if host is a literal IP address + if ip := net.ParseIP(host); ip != nil { + hosts = append(hosts, &HostInfo{hostname: host, connectAddress: ip, port: port}) + return hosts, nil + } + + // Look up host in DNS + ips, err := LookupIP(host) + if err != nil { + return nil, err + } else if len(ips) == 0 { + return nil, fmt.Errorf("no IP's returned from DNS lookup for %q", addr) + } + + // Filter to v4 addresses if any present + if hostLookupPreferV4 { + var preferredIPs []net.IP + for _, v := range ips { + if v4 := v.To4(); v4 != nil { + preferredIPs = append(preferredIPs, v4) + } + } + if len(preferredIPs) != 0 { + ips = preferredIPs + } + } + + for _, ip := range ips { + hosts = append(hosts, &HostInfo{hostname: host, connectAddress: ip, port: port}) + } + + return hosts, nil +} + +func shuffleHosts(hosts []*HostInfo) []*HostInfo { + shuffled := make([]*HostInfo, len(hosts)) + copy(shuffled, hosts) + + mutRandr.Lock() + randr.Shuffle(len(hosts), func(i, j int) { + shuffled[i], shuffled[j] = shuffled[j], shuffled[i] + }) + mutRandr.Unlock() + + return shuffled +} + +// this is going to be version dependant and a nightmare to maintain :( +var protocolSupportRe = regexp.MustCompile(`the lowest supported version is \d+ and the greatest is (\d+)$`) + +func parseProtocolFromError(err error) int { + // I really wish this had the actual info in the error frame... + matches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1) + if len(matches) != 1 || len(matches[0]) != 2 { + if verr, ok := err.(*protocolError); ok { + return int(verr.frame.Header().version.version()) + } + return 0 + } + + max, err := strconv.Atoi(matches[0][1]) + if err != nil { + return 0 + } + + return max +} + +func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) { + hosts = shuffleHosts(hosts) + + connCfg := *c.session.connCfg + connCfg.ProtoVersion = 4 // TODO: define maxProtocol + + handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) { + // we should never get here, but if we do it means we connected to a + // host successfully which means our attempted protocol version worked + if !closed { + c.Close() + } + }) + + var err error + for _, host := range hosts { + var conn *Conn + conn, err = c.session.dial(c.session.ctx, host, &connCfg, handler) + if conn != nil { + conn.Close() + } + + if err == nil { + return connCfg.ProtoVersion, nil + } + + if proto := parseProtocolFromError(err); proto > 0 { + return proto, nil + } + } + + return 0, err +} + +func (c *controlConn) connect(hosts []*HostInfo) error { + if len(hosts) == 0 { + return errors.New("control: no endpoints specified") + } + + // shuffle endpoints so not all drivers will connect to the same initial + // node. + hosts = shuffleHosts(hosts) + + cfg := *c.session.connCfg + cfg.disableCoalesce = true + + var conn *Conn + var err error + for _, host := range hosts { + conn, err = c.session.dial(c.session.ctx, host, &cfg, c) + if err != nil { + c.session.logger.Printf("gocql: unable to dial control conn %v:%v: %v\n", host.ConnectAddress(), host.Port(), err) + continue + } + err = c.setupConn(conn) + if err == nil { + break + } + c.session.logger.Printf("gocql: unable setup control conn %v:%v: %v\n", host.ConnectAddress(), host.Port(), err) + conn.Close() + conn = nil + } + if conn == nil { + return fmt.Errorf("unable to connect to initial hosts: %v", err) + } + + // we could fetch the initial ring here and update initial host data. So that + // when we return from here we have a ring topology ready to go. + + go c.heartBeat() + + return nil +} + +type connHost struct { + conn *Conn + host *HostInfo +} + +func (c *controlConn) setupConn(conn *Conn) error { + // we need up-to-date host info for the filterHost call below + iter := conn.querySystemLocal(context.TODO()) + host, err := c.session.hostInfoFromIter(iter, conn.host.connectAddress, conn.conn.RemoteAddr().(*net.TCPAddr).Port) + if err != nil { + return err + } + + host = c.session.ring.addOrUpdate(host) + + if c.session.cfg.filterHost(host) { + return fmt.Errorf("host was filtered: %v", host.ConnectAddress()) + } + + if err := c.registerEvents(conn); err != nil { + return fmt.Errorf("register events: %v", err) + } + + ch := &connHost{ + conn: conn, + host: host, + } + + c.conn.Store(ch) + if c.session.initialized() { + // We connected to control conn, so add the connect the host in pool as well. + // Notify session we can start trying to connect to the node. + // We can't start the fill before the session is initialized, otherwise the fill would interfere + // with the fill called by Session.init. Session.init needs to wait for its fill to finish and that + // would return immediately if we started the fill here. + // TODO(martin-sucha): Trigger pool refill for all hosts, like in reconnectDownedHosts? + go c.session.startPoolFill(host) + } + return nil +} + +func (c *controlConn) registerEvents(conn *Conn) error { + var events []string + + if !c.session.cfg.Events.DisableTopologyEvents { + events = append(events, "TOPOLOGY_CHANGE") + } + if !c.session.cfg.Events.DisableNodeStatusEvents { + events = append(events, "STATUS_CHANGE") + } + if !c.session.cfg.Events.DisableSchemaEvents { + events = append(events, "SCHEMA_CHANGE") + } + + if len(events) == 0 { + return nil + } + + framer, err := conn.exec(context.Background(), + &writeRegisterFrame{ + events: events, + }, nil) + if err != nil { + return err + } + + frame, err := framer.parseFrame() + if err != nil { + return err + } else if _, ok := frame.(*readyFrame); !ok { + return fmt.Errorf("unexpected frame in response to register: got %T: %v\n", frame, frame) + } + + return nil +} + +func (c *controlConn) reconnect() { + if atomic.LoadInt32(&c.state) == controlConnClosing { + return + } + if !atomic.CompareAndSwapInt32(&c.reconnecting, 0, 1) { + return + } + defer atomic.StoreInt32(&c.reconnecting, 0) + + conn, err := c.attemptReconnect() + + if conn == nil { + c.session.logger.Printf("gocql: unable to reconnect control connection: %v\n", err) + return + } + + err = c.session.refreshRing() + if err != nil { + c.session.logger.Printf("gocql: unable to refresh ring: %v\n", err) + } +} + +func (c *controlConn) attemptReconnect() (*Conn, error) { + hosts := c.session.ring.allHosts() + hosts = shuffleHosts(hosts) + + // keep the old behavior of connecting to the old host first by moving it to + // the front of the slice + ch := c.getConn() + if ch != nil { + for i := range hosts { + if hosts[i].Equal(ch.host) { + hosts[0], hosts[i] = hosts[i], hosts[0] + break + } + } + ch.conn.Close() + } + + conn, err := c.attemptReconnectToAnyOfHosts(hosts) + + if conn != nil { + return conn, err + } + + c.session.logger.Printf("gocql: unable to connect to any ring node: %v\n", err) + c.session.logger.Printf("gocql: control falling back to initial contact points.\n") + // Fallback to initial contact points, as it may be the case that all known initialHosts + // changed their IPs while keeping the same hostname(s). + initialHosts, resolvErr := addrsToHosts(c.session.cfg.Hosts, c.session.cfg.Port, c.session.logger) + if resolvErr != nil { + return nil, fmt.Errorf("resolve contact points' hostnames: %v", resolvErr) + } + + return c.attemptReconnectToAnyOfHosts(initialHosts) +} + +func (c *controlConn) attemptReconnectToAnyOfHosts(hosts []*HostInfo) (*Conn, error) { + var conn *Conn + var err error + for _, host := range hosts { + conn, err = c.session.connect(c.session.ctx, host, c) + if err != nil { + c.session.logger.Printf("gocql: unable to dial control conn %v:%v: %v\n", host.ConnectAddress(), host.Port(), err) + continue + } + err = c.setupConn(conn) + if err == nil { + break + } + c.session.logger.Printf("gocql: unable setup control conn %v:%v: %v\n", host.ConnectAddress(), host.Port(), err) + conn.Close() + conn = nil + } + return conn, err +} + +func (c *controlConn) HandleError(conn *Conn, err error, closed bool) { + if !closed { + return + } + + oldConn := c.getConn() + + // If connection has long gone, and not been attempted for awhile, + // it's possible to have oldConn as nil here (#1297). + if oldConn != nil && oldConn.conn != conn { + return + } + + c.reconnect() +} + +func (c *controlConn) getConn() *connHost { + return c.conn.Load().(*connHost) +} + +func (c *controlConn) writeFrame(w frameBuilder) (frame, error) { + ch := c.getConn() + if ch == nil { + return nil, errNoControl + } + + framer, err := ch.conn.exec(context.Background(), w, nil) + if err != nil { + return nil, err + } + + return framer.parseFrame() +} + +func (c *controlConn) withConnHost(fn func(*connHost) *Iter) *Iter { + const maxConnectAttempts = 5 + connectAttempts := 0 + + for i := 0; i < maxConnectAttempts; i++ { + ch := c.getConn() + if ch == nil { + if connectAttempts > maxConnectAttempts { + break + } + + connectAttempts++ + + c.reconnect() + continue + } + + return fn(ch) + } + + return &Iter{err: errNoControl} +} + +func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { + return c.withConnHost(func(ch *connHost) *Iter { + return fn(ch.conn) + }) +} + +// query will return nil if the connection is closed or nil +func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter) { + q := c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}).Trace(nil) + + for { + iter = c.withConn(func(conn *Conn) *Iter { + // we want to keep the query on the control connection + q.conn = conn + return conn.executeQuery(context.TODO(), q) + }) + + if gocqlDebug && iter.err != nil { + c.session.logger.Printf("control: error executing %q: %v\n", statement, iter.err) + } + + q.AddAttempts(1, c.getConn().host) + if iter.err == nil || !c.retry.Attempt(q) { + break + } + } + + return +} + +func (c *controlConn) awaitSchemaAgreement() error { + return c.withConn(func(conn *Conn) *Iter { + return &Iter{err: conn.awaitSchemaAgreement(context.TODO())} + }).err +} + +func (c *controlConn) close() { + if atomic.CompareAndSwapInt32(&c.state, controlConnStarted, controlConnClosing) { + c.quit <- struct{}{} + } + + ch := c.getConn() + if ch != nil { + ch.conn.Close() + } +} + +var errNoControl = errors.New("gocql: no control connection available") diff --git a/vendor/github.com/gocql/gocql/cqltypes.go b/vendor/github.com/gocql/gocql/cqltypes.go new file mode 100644 index 000000000..e465e94ea --- /dev/null +++ b/vendor/github.com/gocql/gocql/cqltypes.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +type Duration struct { + Months int32 + Days int32 + Nanoseconds int64 +} diff --git a/vendor/github.com/gocql/gocql/debug_off.go b/vendor/github.com/gocql/gocql/debug_off.go new file mode 100644 index 000000000..31e622599 --- /dev/null +++ b/vendor/github.com/gocql/gocql/debug_off.go @@ -0,0 +1,6 @@ +//go:build !gocql_debug +// +build !gocql_debug + +package gocql + +const gocqlDebug = false diff --git a/vendor/github.com/gocql/gocql/debug_on.go b/vendor/github.com/gocql/gocql/debug_on.go new file mode 100644 index 000000000..b3bdfab8d --- /dev/null +++ b/vendor/github.com/gocql/gocql/debug_on.go @@ -0,0 +1,6 @@ +//go:build gocql_debug +// +build gocql_debug + +package gocql + +const gocqlDebug = true diff --git a/vendor/github.com/gocql/gocql/dial.go b/vendor/github.com/gocql/gocql/dial.go new file mode 100644 index 000000000..0613cebe0 --- /dev/null +++ b/vendor/github.com/gocql/gocql/dial.go @@ -0,0 +1,91 @@ +package gocql + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "strings" +) + +// HostDialer allows customizing connection to cluster nodes. +type HostDialer interface { + // DialHost establishes a connection to the host. + // The returned connection must be directly usable for CQL protocol, + // specifically DialHost is responsible also for setting up the TLS session if needed. + // DialHost should disable write coalescing if the returned net.Conn does not support writev. + // As of Go 1.18, only plain TCP connections support writev, TLS sessions should disable coalescing. + // You can use WrapTLS helper function if you don't need to override the TLS setup. + DialHost(ctx context.Context, host *HostInfo) (*DialedHost, error) +} + +// DialedHost contains information about established connection to a host. +type DialedHost struct { + // Conn used to communicate with the server. + Conn net.Conn + + // DisableCoalesce disables write coalescing for the Conn. + // If true, the effect is the same as if WriteCoalesceWaitTime was configured to 0. + DisableCoalesce bool +} + +// defaultHostDialer dials host in a default way. +type defaultHostDialer struct { + dialer Dialer + tlsConfig *tls.Config +} + +func (hd *defaultHostDialer) DialHost(ctx context.Context, host *HostInfo) (*DialedHost, error) { + ip := host.ConnectAddress() + port := host.Port() + + if !validIpAddr(ip) { + return nil, fmt.Errorf("host missing connect ip address: %v", ip) + } else if port == 0 { + return nil, fmt.Errorf("host missing port: %v", port) + } + + connAddr := host.ConnectAddressAndPort() + conn, err := hd.dialer.DialContext(ctx, "tcp", connAddr) + if err != nil { + return nil, err + } + addr := host.HostnameAndPort() + return WrapTLS(ctx, conn, addr, hd.tlsConfig) +} + +func tlsConfigForAddr(tlsConfig *tls.Config, addr string) *tls.Config { + // the TLS config is safe to be reused by connections but it must not + // be modified after being used. + if !tlsConfig.InsecureSkipVerify && tlsConfig.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + // clone config to avoid modifying the shared one. + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = hostname + } + return tlsConfig +} + +// WrapTLS optionally wraps a net.Conn connected to addr with the given tlsConfig. +// If the tlsConfig is nil, conn is not wrapped into a TLS session, so is insecure. +// If the tlsConfig does not have server name set, it is updated based on the default gocql rules. +func WrapTLS(ctx context.Context, conn net.Conn, addr string, tlsConfig *tls.Config) (*DialedHost, error) { + if tlsConfig != nil { + tlsConfig := tlsConfigForAddr(tlsConfig, addr) + tconn := tls.Client(conn, tlsConfig) + if err := tconn.HandshakeContext(ctx); err != nil { + conn.Close() + return nil, err + } + conn = tconn + } + + return &DialedHost{ + Conn: conn, + DisableCoalesce: tlsConfig != nil, // write coalescing can't use writev when the connection is wrapped. + }, nil +} diff --git a/vendor/github.com/gocql/gocql/doc.go b/vendor/github.com/gocql/gocql/doc.go new file mode 100644 index 000000000..6739d98e4 --- /dev/null +++ b/vendor/github.com/gocql/gocql/doc.go @@ -0,0 +1,360 @@ +// Copyright (c) 2012-2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocql implements a fast and robust Cassandra driver for the +// Go programming language. +// +// # Connecting to the cluster +// +// Pass a list of initial node IP addresses to NewCluster to create a new cluster configuration: +// +// cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") +// +// Port can be specified as part of the address, the above is equivalent to: +// +// cluster := gocql.NewCluster("192.168.1.1:9042", "192.168.1.2:9042", "192.168.1.3:9042") +// +// It is recommended to use the value set in the Cassandra config for broadcast_address or listen_address, +// an IP address not a domain name. This is because events from Cassandra will use the configured IP +// address, which is used to index connected hosts. If the domain name specified resolves to more than 1 IP address +// then the driver may connect multiple times to the same host, and will not mark the node being down or up from events. +// +// Then you can customize more options (see ClusterConfig): +// +// cluster.Keyspace = "example" +// cluster.Consistency = gocql.Quorum +// cluster.ProtoVersion = 4 +// +// The driver tries to automatically detect the protocol version to use if not set, but you might want to set the +// protocol version explicitly, as it's not defined which version will be used in certain situations (for example +// during upgrade of the cluster when some of the nodes support different set of protocol versions than other nodes). +// +// The driver advertises the module name and version in the STARTUP message, so servers are able to detect the version. +// If you use replace directive in go.mod, the driver will send information about the replacement module instead. +// +// When ready, create a session from the configuration. Don't forget to Close the session once you are done with it: +// +// session, err := cluster.CreateSession() +// if err != nil { +// return err +// } +// defer session.Close() +// +// # Authentication +// +// CQL protocol uses a SASL-based authentication mechanism and so consists of an exchange of server challenges and +// client response pairs. The details of the exchanged messages depend on the authenticator used. +// +// To use authentication, set ClusterConfig.Authenticator or ClusterConfig.AuthProvider. +// +// PasswordAuthenticator is provided to use for username/password authentication: +// +// cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") +// cluster.Authenticator = gocql.PasswordAuthenticator{ +// Username: "user", +// Password: "password" +// } +// session, err := cluster.CreateSession() +// if err != nil { +// return err +// } +// defer session.Close() +// +// # Transport layer security +// +// It is possible to secure traffic between the client and server with TLS. +// +// To use TLS, set the ClusterConfig.SslOpts field. SslOptions embeds *tls.Config so you can set that directly. +// There are also helpers to load keys/certificates from files. +// +// Warning: Due to historical reasons, the SslOptions is insecure by default, so you need to set EnableHostVerification +// to true if no Config is set. Most users should set SslOptions.Config to a *tls.Config. +// SslOptions and Config.InsecureSkipVerify interact as follows: +// +// Config.InsecureSkipVerify | EnableHostVerification | Result +// Config is nil | false | do not verify host +// Config is nil | true | verify host +// false | false | verify host +// true | false | do not verify host +// false | true | verify host +// true | true | verify host +// +// For example: +// +// cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") +// cluster.SslOpts = &gocql.SslOptions{ +// EnableHostVerification: true, +// } +// session, err := cluster.CreateSession() +// if err != nil { +// return err +// } +// defer session.Close() +// +// # Data-center awareness and query routing +// +// To route queries to local DC first, use DCAwareRoundRobinPolicy. For example, if the datacenter you +// want to primarily connect is called dc1 (as configured in the database): +// +// cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") +// cluster.PoolConfig.HostSelectionPolicy = gocql.DCAwareRoundRobinPolicy("dc1") +// +// The driver can route queries to nodes that hold data replicas based on partition key (preferring local DC). +// +// cluster := gocql.NewCluster("192.168.1.1", "192.168.1.2", "192.168.1.3") +// cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(gocql.DCAwareRoundRobinPolicy("dc1")) +// +// Note that TokenAwareHostPolicy can take options such as gocql.ShuffleReplicas and gocql.NonLocalReplicasFallback. +// +// We recommend running with a token aware host policy in production for maximum performance. +// +// The driver can only use token-aware routing for queries where all partition key columns are query parameters. +// For example, instead of +// +// session.Query("select value from mytable where pk1 = 'abc' AND pk2 = ?", "def") +// +// use +// +// session.Query("select value from mytable where pk1 = ? AND pk2 = ?", "abc", "def") +// +// # Rack-level awareness +// +// The DCAwareRoundRobinPolicy can be replaced with RackAwareRoundRobinPolicy, which takes two parameters, datacenter and rack. +// +// Instead of dividing hosts with two tiers (local datacenter and remote datacenters) it divides hosts into three +// (the local rack, the rest of the local datacenter, and everything else). +// +// RackAwareRoundRobinPolicy can be combined with TokenAwareHostPolicy in the same way as DCAwareRoundRobinPolicy. +// +// # Executing queries +// +// Create queries with Session.Query. Query values must not be reused between different executions and must not be +// modified after starting execution of the query. +// +// To execute a query without reading results, use Query.Exec: +// +// err := session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`, +// "me", gocql.TimeUUID(), "hello world").WithContext(ctx).Exec() +// +// Single row can be read by calling Query.Scan: +// +// err := session.Query(`SELECT id, text FROM tweet WHERE timeline = ? LIMIT 1`, +// "me").WithContext(ctx).Consistency(gocql.One).Scan(&id, &text) +// +// Multiple rows can be read using Iter.Scanner: +// +// scanner := session.Query(`SELECT id, text FROM tweet WHERE timeline = ?`, +// "me").WithContext(ctx).Iter().Scanner() +// for scanner.Next() { +// var ( +// id gocql.UUID +// text string +// ) +// err = scanner.Scan(&id, &text) +// if err != nil { +// log.Fatal(err) +// } +// fmt.Println("Tweet:", id, text) +// } +// // scanner.Err() closes the iterator, so scanner nor iter should be used afterwards. +// if err := scanner.Err(); err != nil { +// log.Fatal(err) +// } +// +// See Example for complete example. +// +// # Prepared statements +// +// The driver automatically prepares DML queries (SELECT/INSERT/UPDATE/DELETE/BATCH statements) and maintains a cache +// of prepared statements. +// CQL protocol does not support preparing other query types. +// +// When using CQL protocol >= 4, it is possible to use gocql.UnsetValue as the bound value of a column. +// This will cause the database to ignore writing the column. +// The main advantage is the ability to keep the same prepared statement even when you don't +// want to update some fields, where before you needed to make another prepared statement. +// +// # Executing multiple queries concurrently +// +// Session is safe to use from multiple goroutines, so to execute multiple concurrent queries, just execute them +// from several worker goroutines. Gocql provides synchronously-looking API (as recommended for Go APIs) and the queries +// are executed asynchronously at the protocol level. +// +// results := make(chan error, 2) +// go func() { +// results <- session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`, +// "me", gocql.TimeUUID(), "hello world 1").Exec() +// }() +// go func() { +// results <- session.Query(`INSERT INTO tweet (timeline, id, text) VALUES (?, ?, ?)`, +// "me", gocql.TimeUUID(), "hello world 2").Exec() +// }() +// +// # Nulls +// +// Null values are are unmarshalled as zero value of the type. If you need to distinguish for example between text +// column being null and empty string, you can unmarshal into *string variable instead of string. +// +// var text *string +// err := scanner.Scan(&text) +// if err != nil { +// // handle error +// } +// if text != nil { +// // not null +// } +// else { +// // null +// } +// +// See Example_nulls for full example. +// +// # Reusing slices +// +// The driver reuses backing memory of slices when unmarshalling. This is an optimization so that a buffer does not +// need to be allocated for every processed row. However, you need to be careful when storing the slices to other +// memory structures. +// +// scanner := session.Query(`SELECT myints FROM table WHERE pk = ?`, "key").WithContext(ctx).Iter().Scanner() +// var myInts []int +// for scanner.Next() { +// // This scan reuses backing store of myInts for each row. +// err = scanner.Scan(&myInts) +// if err != nil { +// log.Fatal(err) +// } +// } +// +// When you want to save the data for later use, pass a new slice every time. A common pattern is to declare the +// slice variable within the scanner loop: +// +// scanner := session.Query(`SELECT myints FROM table WHERE pk = ?`, "key").WithContext(ctx).Iter().Scanner() +// for scanner.Next() { +// var myInts []int +// // This scan always gets pointer to fresh myInts slice, so does not reuse memory. +// err = scanner.Scan(&myInts) +// if err != nil { +// log.Fatal(err) +// } +// } +// +// # Paging +// +// The driver supports paging of results with automatic prefetch, see ClusterConfig.PageSize, Session.SetPrefetch, +// Query.PageSize, and Query.Prefetch. +// +// It is also possible to control the paging manually with Query.PageState (this disables automatic prefetch). +// Manual paging is useful if you want to store the page state externally, for example in a URL to allow users +// browse pages in a result. You might want to sign/encrypt the paging state when exposing it externally since +// it contains data from primary keys. +// +// Paging state is specific to the CQL protocol version and the exact query used. It is meant as opaque state that +// should not be modified. If you send paging state from different query or protocol version, then the behaviour +// is not defined (you might get unexpected results or an error from the server). For example, do not send paging state +// returned by node using protocol version 3 to a node using protocol version 4. Also, when using protocol version 4, +// paging state between Cassandra 2.2 and 3.0 is incompatible (https://issues.apache.org/jira/browse/CASSANDRA-10880). +// +// The driver does not check whether the paging state is from the same protocol version/statement. +// You might want to validate yourself as this could be a problem if you store paging state externally. +// For example, if you store paging state in a URL, the URLs might become broken when you upgrade your cluster. +// +// Call Query.PageState(nil) to fetch just the first page of the query results. Pass the page state returned by +// Iter.PageState to Query.PageState of a subsequent query to get the next page. If the length of slice returned +// by Iter.PageState is zero, there are no more pages available (or an error occurred). +// +// Using too low values of PageSize will negatively affect performance, a value below 100 is probably too low. +// While Cassandra returns exactly PageSize items (except for last page) in a page currently, the protocol authors +// explicitly reserved the right to return smaller or larger amount of items in a page for performance reasons, so don't +// rely on the page having the exact count of items. +// +// See Example_paging for an example of manual paging. +// +// # Dynamic list of columns +// +// There are certain situations when you don't know the list of columns in advance, mainly when the query is supplied +// by the user. Iter.Columns, Iter.RowData, Iter.MapScan and Iter.SliceMap can be used to handle this case. +// +// See Example_dynamicColumns. +// +// # Batches +// +// The CQL protocol supports sending batches of DML statements (INSERT/UPDATE/DELETE) and so does gocql. +// Use Session.NewBatch to create a new batch and then fill-in details of individual queries. +// Then execute the batch with Session.ExecuteBatch. +// +// Logged batches ensure atomicity, either all or none of the operations in the batch will succeed, but they have +// overhead to ensure this property. +// Unlogged batches don't have the overhead of logged batches, but don't guarantee atomicity. +// Updates of counters are handled specially by Cassandra so batches of counter updates have to use CounterBatch type. +// A counter batch can only contain statements to update counters. +// +// For unlogged batches it is recommended to send only single-partition batches (i.e. all statements in the batch should +// involve only a single partition). +// Multi-partition batch needs to be split by the coordinator node and re-sent to +// correct nodes. +// With single-partition batches you can send the batch directly to the node for the partition without incurring the +// additional network hop. +// +// It is also possible to pass entire BEGIN BATCH .. APPLY BATCH statement to Query.Exec. +// There are differences how those are executed. +// BEGIN BATCH statement passed to Query.Exec is prepared as a whole in a single statement. +// Session.ExecuteBatch prepares individual statements in the batch. +// If you have variable-length batches using the same statement, using Session.ExecuteBatch is more efficient. +// +// See Example_batch for an example. +// +// # Lightweight transactions +// +// Query.ScanCAS or Query.MapScanCAS can be used to execute a single-statement lightweight transaction (an +// INSERT/UPDATE .. IF statement) and reading its result. See example for Query.MapScanCAS. +// +// Multiple-statement lightweight transactions can be executed as a logged batch that contains at least one conditional +// statement. All the conditions must return true for the batch to be applied. You can use Session.ExecuteBatchCAS and +// Session.MapExecuteBatchCAS when executing the batch to learn about the result of the LWT. See example for +// Session.MapExecuteBatchCAS. +// +// # Retries and speculative execution +// +// Queries can be marked as idempotent. Marking the query as idempotent tells the driver that the query can be executed +// multiple times without affecting its result. Non-idempotent queries are not eligible for retrying nor speculative +// execution. +// +// Idempotent queries are retried in case of errors based on the configured RetryPolicy. +// +// Queries can be retried even before they fail by setting a SpeculativeExecutionPolicy. The policy can +// cause the driver to retry on a different node if the query is taking longer than a specified delay even before the +// driver receives an error or timeout from the server. When a query is speculatively executed, the original execution +// is still executing. The two parallel executions of the query race to return a result, the first received result will +// be returned. +// +// # User-defined types +// +// UDTs can be mapped (un)marshaled from/to map[string]interface{} a Go struct (or a type implementing +// UDTUnmarshaler, UDTMarshaler, Unmarshaler or Marshaler interfaces). +// +// For structs, cql tag can be used to specify the CQL field name to be mapped to a struct field: +// +// type MyUDT struct { +// FieldA int32 `cql:"a"` +// FieldB string `cql:"b"` +// } +// +// See Example_userDefinedTypesMap, Example_userDefinedTypesStruct, ExampleUDTMarshaler, ExampleUDTUnmarshaler. +// +// # Metrics and tracing +// +// It is possible to provide observer implementations that could be used to gather metrics: +// +// - QueryObserver for monitoring individual queries. +// - BatchObserver for monitoring batch queries. +// - ConnectObserver for monitoring new connections from the driver to the database. +// - FrameHeaderObserver for monitoring individual protocol frames. +// +// CQL protocol also supports tracing of queries. When enabled, the database will write information about +// internal events that happened during execution of the query. You can use Query.Trace to request tracing and receive +// the session ID that the database used to store the trace information in system_traces.sessions and +// system_traces.events tables. NewTraceWriter returns an implementation of Tracer that writes the events to a writer. +// Gathering trace information might be essential for debugging and optimizing queries, but writing traces has overhead, +// so this feature should not be used on production systems with very high load unless you know what you are doing. +package gocql // import "github.com/gocql/gocql" diff --git a/vendor/github.com/gocql/gocql/errors.go b/vendor/github.com/gocql/gocql/errors.go new file mode 100644 index 000000000..faa6f7c9d --- /dev/null +++ b/vendor/github.com/gocql/gocql/errors.go @@ -0,0 +1,198 @@ +package gocql + +import "fmt" + +// See CQL Binary Protocol v5, section 8 for more details. +// https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec +const ( + // ErrCodeServer indicates unexpected error on server-side. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1246-L1247 + ErrCodeServer = 0x0000 + // ErrCodeProtocol indicates a protocol violation by some client message. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1248-L1250 + ErrCodeProtocol = 0x000A + // ErrCodeCredentials indicates missing required authentication. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1251-L1254 + ErrCodeCredentials = 0x0100 + // ErrCodeUnavailable indicates unavailable error. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1255-L1265 + ErrCodeUnavailable = 0x1000 + // ErrCodeOverloaded returned in case of request on overloaded node coordinator. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1266-L1267 + ErrCodeOverloaded = 0x1001 + // ErrCodeBootstrapping returned from the coordinator node in bootstrapping phase. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1268-L1269 + ErrCodeBootstrapping = 0x1002 + // ErrCodeTruncate indicates truncation exception. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1270 + ErrCodeTruncate = 0x1003 + // ErrCodeWriteTimeout returned in case of timeout during the request write. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1271-L1304 + ErrCodeWriteTimeout = 0x1100 + // ErrCodeReadTimeout returned in case of timeout during the request read. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1305-L1321 + ErrCodeReadTimeout = 0x1200 + // ErrCodeReadFailure indicates request read error which is not covered by ErrCodeReadTimeout. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1322-L1340 + ErrCodeReadFailure = 0x1300 + // ErrCodeFunctionFailure indicates an error in user-defined function. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1341-L1347 + ErrCodeFunctionFailure = 0x1400 + // ErrCodeWriteFailure indicates request write error which is not covered by ErrCodeWriteTimeout. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1348-L1385 + ErrCodeWriteFailure = 0x1500 + // ErrCodeCDCWriteFailure is defined, but not yet documented in CQLv5 protocol. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1386 + ErrCodeCDCWriteFailure = 0x1600 + // ErrCodeCASWriteUnknown indicates only partially completed CAS operation. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1387-L1397 + ErrCodeCASWriteUnknown = 0x1700 + // ErrCodeSyntax indicates the syntax error in the query. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1399 + ErrCodeSyntax = 0x2000 + // ErrCodeUnauthorized indicates access rights violation by user on performed operation. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1400-L1401 + ErrCodeUnauthorized = 0x2100 + // ErrCodeInvalid indicates invalid query error which is not covered by ErrCodeSyntax. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1402 + ErrCodeInvalid = 0x2200 + // ErrCodeConfig indicates the configuration error. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1403 + ErrCodeConfig = 0x2300 + // ErrCodeAlreadyExists is returned for the requests creating the existing keyspace/table. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1404-L1413 + ErrCodeAlreadyExists = 0x2400 + // ErrCodeUnprepared returned from the host for prepared statement which is unknown. + // + // See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1414-L1417 + ErrCodeUnprepared = 0x2500 +) + +type RequestError interface { + Code() int + Message() string + Error() string +} + +type errorFrame struct { + frameHeader + + code int + message string +} + +func (e errorFrame) Code() int { + return e.code +} + +func (e errorFrame) Message() string { + return e.message +} + +func (e errorFrame) Error() string { + return e.Message() +} + +func (e errorFrame) String() string { + return fmt.Sprintf("[error code=%x message=%q]", e.code, e.message) +} + +type RequestErrUnavailable struct { + errorFrame + Consistency Consistency + Required int + Alive int +} + +func (e *RequestErrUnavailable) String() string { + return fmt.Sprintf("[request_error_unavailable consistency=%s required=%d alive=%d]", e.Consistency, e.Required, e.Alive) +} + +type ErrorMap map[string]uint16 + +type RequestErrWriteTimeout struct { + errorFrame + Consistency Consistency + Received int + BlockFor int + WriteType string +} + +type RequestErrWriteFailure struct { + errorFrame + Consistency Consistency + Received int + BlockFor int + NumFailures int + WriteType string + ErrorMap ErrorMap +} + +type RequestErrCDCWriteFailure struct { + errorFrame +} + +type RequestErrReadTimeout struct { + errorFrame + Consistency Consistency + Received int + BlockFor int + DataPresent byte +} + +type RequestErrAlreadyExists struct { + errorFrame + Keyspace string + Table string +} + +type RequestErrUnprepared struct { + errorFrame + StatementId []byte +} + +type RequestErrReadFailure struct { + errorFrame + Consistency Consistency + Received int + BlockFor int + NumFailures int + DataPresent bool + ErrorMap ErrorMap +} + +type RequestErrFunctionFailure struct { + errorFrame + Keyspace string + Function string + ArgTypes []string +} + +// RequestErrCASWriteUnknown is distinct error for ErrCodeCasWriteUnknown. +// +// See https://github.com/apache/cassandra/blob/7337fc0/doc/native_protocol_v5.spec#L1387-L1397 +type RequestErrCASWriteUnknown struct { + errorFrame + Consistency Consistency + Received int + BlockFor int +} diff --git a/vendor/github.com/gocql/gocql/events.go b/vendor/github.com/gocql/gocql/events.go new file mode 100644 index 000000000..73461f629 --- /dev/null +++ b/vendor/github.com/gocql/gocql/events.go @@ -0,0 +1,246 @@ +package gocql + +import ( + "net" + "sync" + "time" +) + +type eventDebouncer struct { + name string + timer *time.Timer + mu sync.Mutex + events []frame + + callback func([]frame) + quit chan struct{} + + logger StdLogger +} + +func newEventDebouncer(name string, eventHandler func([]frame), logger StdLogger) *eventDebouncer { + e := &eventDebouncer{ + name: name, + quit: make(chan struct{}), + timer: time.NewTimer(eventDebounceTime), + callback: eventHandler, + logger: logger, + } + e.timer.Stop() + go e.flusher() + + return e +} + +func (e *eventDebouncer) stop() { + e.quit <- struct{}{} // sync with flusher + close(e.quit) +} + +func (e *eventDebouncer) flusher() { + for { + select { + case <-e.timer.C: + e.mu.Lock() + e.flush() + e.mu.Unlock() + case <-e.quit: + return + } + } +} + +const ( + eventBufferSize = 1000 + eventDebounceTime = 1 * time.Second +) + +// flush must be called with mu locked +func (e *eventDebouncer) flush() { + if len(e.events) == 0 { + return + } + + // if the flush interval is faster than the callback then we will end up calling + // the callback multiple times, probably a bad idea. In this case we could drop + // frames? + go e.callback(e.events) + e.events = make([]frame, 0, eventBufferSize) +} + +func (e *eventDebouncer) debounce(frame frame) { + e.mu.Lock() + e.timer.Reset(eventDebounceTime) + + // TODO: probably need a warning to track if this threshold is too low + if len(e.events) < eventBufferSize { + e.events = append(e.events, frame) + } else { + e.logger.Printf("%s: buffer full, dropping event frame: %s", e.name, frame) + } + + e.mu.Unlock() +} + +func (s *Session) handleEvent(framer *framer) { + frame, err := framer.parseFrame() + if err != nil { + s.logger.Printf("gocql: unable to parse event frame: %v\n", err) + return + } + + if gocqlDebug { + s.logger.Printf("gocql: handling frame: %v\n", frame) + } + + switch f := frame.(type) { + case *schemaChangeKeyspace, *schemaChangeFunction, + *schemaChangeTable, *schemaChangeAggregate, *schemaChangeType: + + s.schemaEvents.debounce(frame) + case *topologyChangeEventFrame, *statusChangeEventFrame: + s.nodeEvents.debounce(frame) + default: + s.logger.Printf("gocql: invalid event frame (%T): %v\n", f, f) + } +} + +func (s *Session) handleSchemaEvent(frames []frame) { + // TODO: debounce events + for _, frame := range frames { + switch f := frame.(type) { + case *schemaChangeKeyspace: + s.schemaDescriber.clearSchema(f.keyspace) + s.handleKeyspaceChange(f.keyspace, f.change) + case *schemaChangeTable: + s.schemaDescriber.clearSchema(f.keyspace) + case *schemaChangeAggregate: + s.schemaDescriber.clearSchema(f.keyspace) + case *schemaChangeFunction: + s.schemaDescriber.clearSchema(f.keyspace) + case *schemaChangeType: + s.schemaDescriber.clearSchema(f.keyspace) + } + } +} + +func (s *Session) handleKeyspaceChange(keyspace, change string) { + s.control.awaitSchemaAgreement() + s.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: keyspace, Change: change}) +} + +// handleNodeEvent handles inbound status and topology change events. +// +// Status events are debounced by host IP; only the latest event is processed. +// +// Topology events are debounced by performing a single full topology refresh +// whenever any topology event comes in. +// +// Processing topology change events before status change events ensures +// that a NEW_NODE event is not dropped in favor of a newer UP event (which +// would itself be dropped/ignored, as the node is not yet known). +func (s *Session) handleNodeEvent(frames []frame) { + type nodeEvent struct { + change string + host net.IP + port int + } + + topologyEventReceived := false + // status change events + sEvents := make(map[string]*nodeEvent) + + for _, frame := range frames { + switch f := frame.(type) { + case *topologyChangeEventFrame: + topologyEventReceived = true + case *statusChangeEventFrame: + event, ok := sEvents[f.host.String()] + if !ok { + event = &nodeEvent{change: f.change, host: f.host, port: f.port} + sEvents[f.host.String()] = event + } + event.change = f.change + } + } + + if topologyEventReceived && !s.cfg.Events.DisableTopologyEvents { + s.debounceRingRefresh() + } + + for _, f := range sEvents { + if gocqlDebug { + s.logger.Printf("gocql: dispatching status change event: %+v\n", f) + } + + // ignore events we received if they were disabled + // see https://github.com/gocql/gocql/issues/1591 + switch f.change { + case "UP": + if !s.cfg.Events.DisableNodeStatusEvents { + s.handleNodeUp(f.host, f.port) + } + case "DOWN": + if !s.cfg.Events.DisableNodeStatusEvents { + s.handleNodeDown(f.host, f.port) + } + } + } +} + +func (s *Session) handleNodeUp(eventIp net.IP, eventPort int) { + if gocqlDebug { + s.logger.Printf("gocql: Session.handleNodeUp: %s:%d\n", eventIp.String(), eventPort) + } + + host, ok := s.ring.getHostByIP(eventIp.String()) + if !ok { + s.debounceRingRefresh() + return + } + + if s.cfg.filterHost(host) { + return + } + + if d := host.Version().nodeUpDelay(); d > 0 { + time.Sleep(d) + } + s.startPoolFill(host) +} + +func (s *Session) startPoolFill(host *HostInfo) { + // we let the pool call handleNodeConnected to change the host state + s.pool.addHost(host) + s.policy.AddHost(host) +} + +func (s *Session) handleNodeConnected(host *HostInfo) { + if gocqlDebug { + s.logger.Printf("gocql: Session.handleNodeConnected: %s:%d\n", host.ConnectAddress(), host.Port()) + } + + host.setState(NodeUp) + + if !s.cfg.filterHost(host) { + s.policy.HostUp(host) + } +} + +func (s *Session) handleNodeDown(ip net.IP, port int) { + if gocqlDebug { + s.logger.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port) + } + + host, ok := s.ring.getHostByIP(ip.String()) + if ok { + host.setState(NodeDown) + if s.cfg.filterHost(host) { + return + } + + s.policy.HostDown(host) + hostID := host.HostID() + s.pool.removeHost(hostID) + } +} diff --git a/vendor/github.com/gocql/gocql/filters.go b/vendor/github.com/gocql/gocql/filters.go new file mode 100644 index 000000000..ecd9c7773 --- /dev/null +++ b/vendor/github.com/gocql/gocql/filters.go @@ -0,0 +1,57 @@ +package gocql + +import "fmt" + +// HostFilter interface is used when a host is discovered via server sent events. +type HostFilter interface { + // Called when a new host is discovered, returning true will cause the host + // to be added to the pools. + Accept(host *HostInfo) bool +} + +// HostFilterFunc converts a func(host HostInfo) bool into a HostFilter +type HostFilterFunc func(host *HostInfo) bool + +func (fn HostFilterFunc) Accept(host *HostInfo) bool { + return fn(host) +} + +// AcceptAllFilter will accept all hosts +func AcceptAllFilter() HostFilter { + return HostFilterFunc(func(host *HostInfo) bool { + return true + }) +} + +func DenyAllFilter() HostFilter { + return HostFilterFunc(func(host *HostInfo) bool { + return false + }) +} + +// DataCentreHostFilter filters all hosts such that they are in the same data centre +// as the supplied data centre. +func DataCentreHostFilter(dataCentre string) HostFilter { + return HostFilterFunc(func(host *HostInfo) bool { + return host.DataCenter() == dataCentre + }) +} + +// WhiteListHostFilter filters incoming hosts by checking that their address is +// in the initial hosts whitelist. +func WhiteListHostFilter(hosts ...string) HostFilter { + hostInfos, err := addrsToHosts(hosts, 9042, nopLogger{}) + if err != nil { + // dont want to panic here, but rather not break the API + panic(fmt.Errorf("unable to lookup host info from address: %v", err)) + } + + m := make(map[string]bool, len(hostInfos)) + for _, host := range hostInfos { + m[host.ConnectAddress().String()] = true + } + + return HostFilterFunc(func(host *HostInfo) bool { + return m[host.ConnectAddress().String()] + }) +} diff --git a/vendor/github.com/gocql/gocql/frame.go b/vendor/github.com/gocql/gocql/frame.go new file mode 100644 index 000000000..44be7879d --- /dev/null +++ b/vendor/github.com/gocql/gocql/frame.go @@ -0,0 +1,2052 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "runtime" + "strings" + "time" +) + +type unsetColumn struct{} + +// UnsetValue represents a value used in a query binding that will be ignored by Cassandra. +// +// By setting a field to the unset value Cassandra will ignore the write completely. +// The main advantage is the ability to keep the same prepared statement even when you don't +// want to update some fields, where before you needed to make another prepared statement. +// +// UnsetValue is only available when using the version 4 of the protocol. +var UnsetValue = unsetColumn{} + +type namedValue struct { + name string + value interface{} +} + +// NamedValue produce a value which will bind to the named parameter in a query +func NamedValue(name string, value interface{}) interface{} { + return &namedValue{ + name: name, + value: value, + } +} + +const ( + protoDirectionMask = 0x80 + protoVersionMask = 0x7F + protoVersion1 = 0x01 + protoVersion2 = 0x02 + protoVersion3 = 0x03 + protoVersion4 = 0x04 + protoVersion5 = 0x05 + + maxFrameSize = 256 * 1024 * 1024 +) + +type protoVersion byte + +func (p protoVersion) request() bool { + return p&protoDirectionMask == 0x00 +} + +func (p protoVersion) response() bool { + return p&protoDirectionMask == 0x80 +} + +func (p protoVersion) version() byte { + return byte(p) & protoVersionMask +} + +func (p protoVersion) String() string { + dir := "REQ" + if p.response() { + dir = "RESP" + } + + return fmt.Sprintf("[version=%d direction=%s]", p.version(), dir) +} + +type frameOp byte + +const ( + // header ops + opError frameOp = 0x00 + opStartup frameOp = 0x01 + opReady frameOp = 0x02 + opAuthenticate frameOp = 0x03 + opOptions frameOp = 0x05 + opSupported frameOp = 0x06 + opQuery frameOp = 0x07 + opResult frameOp = 0x08 + opPrepare frameOp = 0x09 + opExecute frameOp = 0x0A + opRegister frameOp = 0x0B + opEvent frameOp = 0x0C + opBatch frameOp = 0x0D + opAuthChallenge frameOp = 0x0E + opAuthResponse frameOp = 0x0F + opAuthSuccess frameOp = 0x10 +) + +func (f frameOp) String() string { + switch f { + case opError: + return "ERROR" + case opStartup: + return "STARTUP" + case opReady: + return "READY" + case opAuthenticate: + return "AUTHENTICATE" + case opOptions: + return "OPTIONS" + case opSupported: + return "SUPPORTED" + case opQuery: + return "QUERY" + case opResult: + return "RESULT" + case opPrepare: + return "PREPARE" + case opExecute: + return "EXECUTE" + case opRegister: + return "REGISTER" + case opEvent: + return "EVENT" + case opBatch: + return "BATCH" + case opAuthChallenge: + return "AUTH_CHALLENGE" + case opAuthResponse: + return "AUTH_RESPONSE" + case opAuthSuccess: + return "AUTH_SUCCESS" + default: + return fmt.Sprintf("UNKNOWN_OP_%d", f) + } +} + +const ( + // result kind + resultKindVoid = 1 + resultKindRows = 2 + resultKindKeyspace = 3 + resultKindPrepared = 4 + resultKindSchemaChanged = 5 + + // rows flags + flagGlobalTableSpec int = 0x01 + flagHasMorePages int = 0x02 + flagNoMetaData int = 0x04 + + // query flags + flagValues byte = 0x01 + flagSkipMetaData byte = 0x02 + flagPageSize byte = 0x04 + flagWithPagingState byte = 0x08 + flagWithSerialConsistency byte = 0x10 + flagDefaultTimestamp byte = 0x20 + flagWithNameValues byte = 0x40 + flagWithKeyspace byte = 0x80 + + // prepare flags + flagWithPreparedKeyspace uint32 = 0x01 + + // header flags + flagCompress byte = 0x01 + flagTracing byte = 0x02 + flagCustomPayload byte = 0x04 + flagWarning byte = 0x08 + flagBetaProtocol byte = 0x10 +) + +type Consistency uint16 + +const ( + Any Consistency = 0x00 + One Consistency = 0x01 + Two Consistency = 0x02 + Three Consistency = 0x03 + Quorum Consistency = 0x04 + All Consistency = 0x05 + LocalQuorum Consistency = 0x06 + EachQuorum Consistency = 0x07 + LocalOne Consistency = 0x0A +) + +func (c Consistency) String() string { + switch c { + case Any: + return "ANY" + case One: + return "ONE" + case Two: + return "TWO" + case Three: + return "THREE" + case Quorum: + return "QUORUM" + case All: + return "ALL" + case LocalQuorum: + return "LOCAL_QUORUM" + case EachQuorum: + return "EACH_QUORUM" + case LocalOne: + return "LOCAL_ONE" + default: + return fmt.Sprintf("UNKNOWN_CONS_0x%x", uint16(c)) + } +} + +func (c Consistency) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *Consistency) UnmarshalText(text []byte) error { + switch string(text) { + case "ANY": + *c = Any + case "ONE": + *c = One + case "TWO": + *c = Two + case "THREE": + *c = Three + case "QUORUM": + *c = Quorum + case "ALL": + *c = All + case "LOCAL_QUORUM": + *c = LocalQuorum + case "EACH_QUORUM": + *c = EachQuorum + case "LOCAL_ONE": + *c = LocalOne + default: + return fmt.Errorf("invalid consistency %q", string(text)) + } + + return nil +} + +func ParseConsistency(s string) Consistency { + var c Consistency + if err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil { + panic(err) + } + return c +} + +// ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err +// return instead of a panic +func ParseConsistencyWrapper(s string) (consistency Consistency, err error) { + err = consistency.UnmarshalText([]byte(strings.ToUpper(s))) + return +} + +// MustParseConsistency is the same as ParseConsistency except it returns +// an error (never). It is kept here since breaking changes are not good. +// DEPRECATED: use ParseConsistency if you want a panic on parse error. +func MustParseConsistency(s string) (Consistency, error) { + c, err := ParseConsistencyWrapper(s) + if err != nil { + panic(err) + } + return c, nil +} + +type SerialConsistency uint16 + +const ( + Serial SerialConsistency = 0x08 + LocalSerial SerialConsistency = 0x09 +) + +func (s SerialConsistency) String() string { + switch s { + case Serial: + return "SERIAL" + case LocalSerial: + return "LOCAL_SERIAL" + default: + return fmt.Sprintf("UNKNOWN_SERIAL_CONS_0x%x", uint16(s)) + } +} + +func (s SerialConsistency) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SerialConsistency) UnmarshalText(text []byte) error { + switch string(text) { + case "SERIAL": + *s = Serial + case "LOCAL_SERIAL": + *s = LocalSerial + default: + return fmt.Errorf("invalid consistency %q", string(text)) + } + + return nil +} + +const ( + apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal." +) + +var ( + ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed") +) + +const maxFrameHeaderSize = 9 + +func readInt(p []byte) int32 { + return int32(p[0])<<24 | int32(p[1])<<16 | int32(p[2])<<8 | int32(p[3]) +} + +type frameHeader struct { + version protoVersion + flags byte + stream int + op frameOp + length int + warnings []string +} + +func (f frameHeader) String() string { + return fmt.Sprintf("[header version=%s flags=0x%x stream=%d op=%s length=%d]", f.version, f.flags, f.stream, f.op, f.length) +} + +func (f frameHeader) Header() frameHeader { + return f +} + +const defaultBufSize = 128 + +type ObservedFrameHeader struct { + Version protoVersion + Flags byte + Stream int16 + Opcode frameOp + Length int32 + + // StartHeader is the time we started reading the frame header off the network connection. + Start time.Time + // EndHeader is the time we finished reading the frame header off the network connection. + End time.Time + + // Host is Host of the connection the frame header was read from. + Host *HostInfo +} + +func (f ObservedFrameHeader) String() string { + return fmt.Sprintf("[observed header version=%s flags=0x%x stream=%d op=%s length=%d]", f.Version, f.Flags, f.Stream, f.Opcode, f.Length) +} + +// FrameHeaderObserver is the interface implemented by frame observers / stat collectors. +// +// Experimental, this interface and use may change +type FrameHeaderObserver interface { + // ObserveFrameHeader gets called on every received frame header. + ObserveFrameHeader(context.Context, ObservedFrameHeader) +} + +// a framer is responsible for reading, writing and parsing frames on a single stream +type framer struct { + proto byte + // flags are for outgoing flags, enabling compression and tracing etc + flags byte + compres Compressor + headSize int + // if this frame was read then the header will be here + header *frameHeader + + // if tracing flag is set this is not nil + traceID []byte + + // holds a ref to the whole byte slice for buf so that it can be reset to + // 0 after a read. + readBuffer []byte + + buf []byte + + customPayload map[string][]byte +} + +func newFramer(compressor Compressor, version byte) *framer { + buf := make([]byte, defaultBufSize) + f := &framer{ + buf: buf[:0], + readBuffer: buf, + } + var flags byte + if compressor != nil { + flags |= flagCompress + } + if version == protoVersion5 { + flags |= flagBetaProtocol + } + + version &= protoVersionMask + + headSize := 8 + if version > protoVersion2 { + headSize = 9 + } + + f.compres = compressor + f.proto = version + f.flags = flags + f.headSize = headSize + + f.header = nil + f.traceID = nil + + return f +} + +type frame interface { + Header() frameHeader +} + +func readHeader(r io.Reader, p []byte) (head frameHeader, err error) { + _, err = io.ReadFull(r, p[:1]) + if err != nil { + return frameHeader{}, err + } + + version := p[0] & protoVersionMask + + if version < protoVersion1 || version > protoVersion5 { + return frameHeader{}, fmt.Errorf("gocql: unsupported protocol response version: %d", version) + } + + headSize := 9 + if version < protoVersion3 { + headSize = 8 + } + + _, err = io.ReadFull(r, p[1:headSize]) + if err != nil { + return frameHeader{}, err + } + + p = p[:headSize] + + head.version = protoVersion(p[0]) + head.flags = p[1] + + if version > protoVersion2 { + if len(p) != 9 { + return frameHeader{}, fmt.Errorf("not enough bytes to read header require 9 got: %d", len(p)) + } + + head.stream = int(int16(p[2])<<8 | int16(p[3])) + head.op = frameOp(p[4]) + head.length = int(readInt(p[5:])) + } else { + if len(p) != 8 { + return frameHeader{}, fmt.Errorf("not enough bytes to read header require 8 got: %d", len(p)) + } + + head.stream = int(int8(p[2])) + head.op = frameOp(p[3]) + head.length = int(readInt(p[4:])) + } + + return head, nil +} + +// explicitly enables tracing for the framers outgoing requests +func (f *framer) trace() { + f.flags |= flagTracing +} + +// explicitly enables the custom payload flag +func (f *framer) payload() { + f.flags |= flagCustomPayload +} + +// reads a frame form the wire into the framers buffer +func (f *framer) readFrame(r io.Reader, head *frameHeader) error { + if head.length < 0 { + return fmt.Errorf("frame body length can not be less than 0: %d", head.length) + } else if head.length > maxFrameSize { + // need to free up the connection to be used again + _, err := io.CopyN(ioutil.Discard, r, int64(head.length)) + if err != nil { + return fmt.Errorf("error whilst trying to discard frame with invalid length: %v", err) + } + return ErrFrameTooBig + } + + if cap(f.readBuffer) >= head.length { + f.buf = f.readBuffer[:head.length] + } else { + f.readBuffer = make([]byte, head.length) + f.buf = f.readBuffer + } + + // assume the underlying reader takes care of timeouts and retries + n, err := io.ReadFull(r, f.buf) + if err != nil { + return fmt.Errorf("unable to read frame body: read %d/%d bytes: %v", n, head.length, err) + } + + if head.flags&flagCompress == flagCompress { + if f.compres == nil { + return NewErrProtocol("no compressor available with compressed frame body") + } + + f.buf, err = f.compres.Decode(f.buf) + if err != nil { + return err + } + } + + f.header = head + return nil +} + +func (f *framer) parseFrame() (frame frame, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + if f.header.version.request() { + return nil, NewErrProtocol("got a request frame from server: %v", f.header.version) + } + + if f.header.flags&flagTracing == flagTracing { + f.readTrace() + } + + if f.header.flags&flagWarning == flagWarning { + f.header.warnings = f.readStringList() + } + + if f.header.flags&flagCustomPayload == flagCustomPayload { + f.customPayload = f.readBytesMap() + } + + // assumes that the frame body has been read into rbuf + switch f.header.op { + case opError: + frame = f.parseErrorFrame() + case opReady: + frame = f.parseReadyFrame() + case opResult: + frame, err = f.parseResultFrame() + case opSupported: + frame = f.parseSupportedFrame() + case opAuthenticate: + frame = f.parseAuthenticateFrame() + case opAuthChallenge: + frame = f.parseAuthChallengeFrame() + case opAuthSuccess: + frame = f.parseAuthSuccessFrame() + case opEvent: + frame = f.parseEventFrame() + default: + return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op) + } + + return +} + +func (f *framer) parseErrorFrame() frame { + code := f.readInt() + msg := f.readString() + + errD := errorFrame{ + frameHeader: *f.header, + code: code, + message: msg, + } + + switch code { + case ErrCodeUnavailable: + cl := f.readConsistency() + required := f.readInt() + alive := f.readInt() + return &RequestErrUnavailable{ + errorFrame: errD, + Consistency: cl, + Required: required, + Alive: alive, + } + case ErrCodeWriteTimeout: + cl := f.readConsistency() + received := f.readInt() + blockfor := f.readInt() + writeType := f.readString() + return &RequestErrWriteTimeout{ + errorFrame: errD, + Consistency: cl, + Received: received, + BlockFor: blockfor, + WriteType: writeType, + } + case ErrCodeReadTimeout: + cl := f.readConsistency() + received := f.readInt() + blockfor := f.readInt() + dataPresent := f.readByte() + return &RequestErrReadTimeout{ + errorFrame: errD, + Consistency: cl, + Received: received, + BlockFor: blockfor, + DataPresent: dataPresent, + } + case ErrCodeAlreadyExists: + ks := f.readString() + table := f.readString() + return &RequestErrAlreadyExists{ + errorFrame: errD, + Keyspace: ks, + Table: table, + } + case ErrCodeUnprepared: + stmtId := f.readShortBytes() + return &RequestErrUnprepared{ + errorFrame: errD, + StatementId: copyBytes(stmtId), // defensively copy + } + case ErrCodeReadFailure: + res := &RequestErrReadFailure{ + errorFrame: errD, + } + res.Consistency = f.readConsistency() + res.Received = f.readInt() + res.BlockFor = f.readInt() + if f.proto > protoVersion4 { + res.ErrorMap = f.readErrorMap() + res.NumFailures = len(res.ErrorMap) + } else { + res.NumFailures = f.readInt() + } + res.DataPresent = f.readByte() != 0 + + return res + case ErrCodeWriteFailure: + res := &RequestErrWriteFailure{ + errorFrame: errD, + } + res.Consistency = f.readConsistency() + res.Received = f.readInt() + res.BlockFor = f.readInt() + if f.proto > protoVersion4 { + res.ErrorMap = f.readErrorMap() + res.NumFailures = len(res.ErrorMap) + } else { + res.NumFailures = f.readInt() + } + res.WriteType = f.readString() + return res + case ErrCodeFunctionFailure: + res := &RequestErrFunctionFailure{ + errorFrame: errD, + } + res.Keyspace = f.readString() + res.Function = f.readString() + res.ArgTypes = f.readStringList() + return res + + case ErrCodeCDCWriteFailure: + res := &RequestErrCDCWriteFailure{ + errorFrame: errD, + } + return res + case ErrCodeCASWriteUnknown: + res := &RequestErrCASWriteUnknown{ + errorFrame: errD, + } + res.Consistency = f.readConsistency() + res.Received = f.readInt() + res.BlockFor = f.readInt() + return res + case ErrCodeInvalid, ErrCodeBootstrapping, ErrCodeConfig, ErrCodeCredentials, ErrCodeOverloaded, + ErrCodeProtocol, ErrCodeServer, ErrCodeSyntax, ErrCodeTruncate, ErrCodeUnauthorized: + // TODO(zariel): we should have some distinct types for these errors + return errD + default: + panic(fmt.Errorf("unknown error code: 0x%x", errD.code)) + } +} + +func (f *framer) readErrorMap() (errMap ErrorMap) { + errMap = make(ErrorMap) + numErrs := f.readInt() + for i := 0; i < numErrs; i++ { + ip := f.readInetAdressOnly().String() + errMap[ip] = f.readShort() + } + return +} + +func (f *framer) writeHeader(flags byte, op frameOp, stream int) { + f.buf = f.buf[:0] + f.buf = append(f.buf, + f.proto, + flags, + ) + + if f.proto > protoVersion2 { + f.buf = append(f.buf, + byte(stream>>8), + byte(stream), + ) + } else { + f.buf = append(f.buf, + byte(stream), + ) + } + + // pad out length + f.buf = append(f.buf, + byte(op), + 0, + 0, + 0, + 0, + ) +} + +func (f *framer) setLength(length int) { + p := 4 + if f.proto > protoVersion2 { + p = 5 + } + + f.buf[p+0] = byte(length >> 24) + f.buf[p+1] = byte(length >> 16) + f.buf[p+2] = byte(length >> 8) + f.buf[p+3] = byte(length) +} + +func (f *framer) finish() error { + if len(f.buf) > maxFrameSize { + // huge app frame, lets remove it so it doesn't bloat the heap + f.buf = make([]byte, defaultBufSize) + return ErrFrameTooBig + } + + if f.buf[1]&flagCompress == flagCompress { + if f.compres == nil { + panic("compress flag set with no compressor") + } + + // TODO: only compress frames which are big enough + compressed, err := f.compres.Encode(f.buf[f.headSize:]) + if err != nil { + return err + } + + f.buf = append(f.buf[:f.headSize], compressed...) + } + length := len(f.buf) - f.headSize + f.setLength(length) + + return nil +} + +func (f *framer) writeTo(w io.Writer) error { + _, err := w.Write(f.buf) + return err +} + +func (f *framer) readTrace() { + f.traceID = f.readUUID().Bytes() +} + +type readyFrame struct { + frameHeader +} + +func (f *framer) parseReadyFrame() frame { + return &readyFrame{ + frameHeader: *f.header, + } +} + +type supportedFrame struct { + frameHeader + + supported map[string][]string +} + +// TODO: if we move the body buffer onto the frameHeader then we only need a single +// framer, and can move the methods onto the header. +func (f *framer) parseSupportedFrame() frame { + return &supportedFrame{ + frameHeader: *f.header, + + supported: f.readStringMultiMap(), + } +} + +type writeStartupFrame struct { + opts map[string]string +} + +func (w writeStartupFrame) String() string { + return fmt.Sprintf("[startup opts=%+v]", w.opts) +} + +func (w *writeStartupFrame) buildFrame(f *framer, streamID int) error { + f.writeHeader(f.flags&^flagCompress, opStartup, streamID) + f.writeStringMap(w.opts) + + return f.finish() +} + +type writePrepareFrame struct { + statement string + keyspace string + customPayload map[string][]byte +} + +func (w *writePrepareFrame) buildFrame(f *framer, streamID int) error { + if len(w.customPayload) > 0 { + f.payload() + } + f.writeHeader(f.flags, opPrepare, streamID) + f.writeCustomPayload(&w.customPayload) + f.writeLongString(w.statement) + + var flags uint32 = 0 + if w.keyspace != "" { + if f.proto > protoVersion4 { + flags |= flagWithPreparedKeyspace + } else { + panic(fmt.Errorf("the keyspace can only be set with protocol 5 or higher")) + } + } + if f.proto > protoVersion4 { + f.writeUint(flags) + } + if w.keyspace != "" { + f.writeString(w.keyspace) + } + + return f.finish() +} + +func (f *framer) readTypeInfo() TypeInfo { + // TODO: factor this out so the same code paths can be used to parse custom + // types and other types, as much of the logic will be duplicated. + id := f.readShort() + + simple := NativeType{ + proto: f.proto, + typ: Type(id), + } + + if simple.typ == TypeCustom { + simple.custom = f.readString() + if cassType := getApacheCassandraType(simple.custom); cassType != TypeCustom { + simple.typ = cassType + } + } + + switch simple.typ { + case TypeTuple: + n := f.readShort() + tuple := TupleTypeInfo{ + NativeType: simple, + Elems: make([]TypeInfo, n), + } + + for i := 0; i < int(n); i++ { + tuple.Elems[i] = f.readTypeInfo() + } + + return tuple + + case TypeUDT: + udt := UDTTypeInfo{ + NativeType: simple, + } + udt.KeySpace = f.readString() + udt.Name = f.readString() + + n := f.readShort() + udt.Elements = make([]UDTField, n) + for i := 0; i < int(n); i++ { + field := &udt.Elements[i] + field.Name = f.readString() + field.Type = f.readTypeInfo() + } + + return udt + case TypeMap, TypeList, TypeSet: + collection := CollectionType{ + NativeType: simple, + } + + if simple.typ == TypeMap { + collection.Key = f.readTypeInfo() + } + + collection.Elem = f.readTypeInfo() + + return collection + } + + return simple +} + +type preparedMetadata struct { + resultMetadata + + // proto v4+ + pkeyColumns []int + + keyspace string + + table string +} + +func (r preparedMetadata) String() string { + return fmt.Sprintf("[prepared flags=0x%x pkey=%v paging_state=% X columns=%v col_count=%d actual_col_count=%d]", r.flags, r.pkeyColumns, r.pagingState, r.columns, r.colCount, r.actualColCount) +} + +func (f *framer) parsePreparedMetadata() preparedMetadata { + // TODO: deduplicate this from parseMetadata + meta := preparedMetadata{} + + meta.flags = f.readInt() + meta.colCount = f.readInt() + if meta.colCount < 0 { + panic(fmt.Errorf("received negative column count: %d", meta.colCount)) + } + meta.actualColCount = meta.colCount + + if f.proto >= protoVersion4 { + pkeyCount := f.readInt() + pkeys := make([]int, pkeyCount) + for i := 0; i < pkeyCount; i++ { + pkeys[i] = int(f.readShort()) + } + meta.pkeyColumns = pkeys + } + + if meta.flags&flagHasMorePages == flagHasMorePages { + meta.pagingState = copyBytes(f.readBytes()) + } + + if meta.flags&flagNoMetaData == flagNoMetaData { + return meta + } + + globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec + if globalSpec { + meta.keyspace = f.readString() + meta.table = f.readString() + } + + var cols []ColumnInfo + if meta.colCount < 1000 { + // preallocate columninfo to avoid excess copying + cols = make([]ColumnInfo, meta.colCount) + for i := 0; i < meta.colCount; i++ { + f.readCol(&cols[i], &meta.resultMetadata, globalSpec, meta.keyspace, meta.table) + } + } else { + // use append, huge number of columns usually indicates a corrupt frame or + // just a huge row. + for i := 0; i < meta.colCount; i++ { + var col ColumnInfo + f.readCol(&col, &meta.resultMetadata, globalSpec, meta.keyspace, meta.table) + cols = append(cols, col) + } + } + + meta.columns = cols + + return meta +} + +type resultMetadata struct { + flags int + + // only if flagPageState + pagingState []byte + + columns []ColumnInfo + colCount int + + // this is a count of the total number of columns which can be scanned, + // it is at minimum len(columns) but may be larger, for instance when a column + // is a UDT or tuple. + actualColCount int +} + +func (r *resultMetadata) morePages() bool { + return r.flags&flagHasMorePages == flagHasMorePages +} + +func (r resultMetadata) String() string { + return fmt.Sprintf("[metadata flags=0x%x paging_state=% X columns=%v]", r.flags, r.pagingState, r.columns) +} + +func (f *framer) readCol(col *ColumnInfo, meta *resultMetadata, globalSpec bool, keyspace, table string) { + if !globalSpec { + col.Keyspace = f.readString() + col.Table = f.readString() + } else { + col.Keyspace = keyspace + col.Table = table + } + + col.Name = f.readString() + col.TypeInfo = f.readTypeInfo() + switch v := col.TypeInfo.(type) { + // maybe also UDT + case TupleTypeInfo: + // -1 because we already included the tuple column + meta.actualColCount += len(v.Elems) - 1 + } +} + +func (f *framer) parseResultMetadata() resultMetadata { + var meta resultMetadata + + meta.flags = f.readInt() + meta.colCount = f.readInt() + if meta.colCount < 0 { + panic(fmt.Errorf("received negative column count: %d", meta.colCount)) + } + meta.actualColCount = meta.colCount + + if meta.flags&flagHasMorePages == flagHasMorePages { + meta.pagingState = copyBytes(f.readBytes()) + } + + if meta.flags&flagNoMetaData == flagNoMetaData { + return meta + } + + var keyspace, table string + globalSpec := meta.flags&flagGlobalTableSpec == flagGlobalTableSpec + if globalSpec { + keyspace = f.readString() + table = f.readString() + } + + var cols []ColumnInfo + if meta.colCount < 1000 { + // preallocate columninfo to avoid excess copying + cols = make([]ColumnInfo, meta.colCount) + for i := 0; i < meta.colCount; i++ { + f.readCol(&cols[i], &meta, globalSpec, keyspace, table) + } + + } else { + // use append, huge number of columns usually indicates a corrupt frame or + // just a huge row. + for i := 0; i < meta.colCount; i++ { + var col ColumnInfo + f.readCol(&col, &meta, globalSpec, keyspace, table) + cols = append(cols, col) + } + } + + meta.columns = cols + + return meta +} + +type resultVoidFrame struct { + frameHeader +} + +func (f *resultVoidFrame) String() string { + return "[result_void]" +} + +func (f *framer) parseResultFrame() (frame, error) { + kind := f.readInt() + + switch kind { + case resultKindVoid: + return &resultVoidFrame{frameHeader: *f.header}, nil + case resultKindRows: + return f.parseResultRows(), nil + case resultKindKeyspace: + return f.parseResultSetKeyspace(), nil + case resultKindPrepared: + return f.parseResultPrepared(), nil + case resultKindSchemaChanged: + return f.parseResultSchemaChange(), nil + } + + return nil, NewErrProtocol("unknown result kind: %x", kind) +} + +type resultRowsFrame struct { + frameHeader + + meta resultMetadata + // dont parse the rows here as we only need to do it once + numRows int +} + +func (f *resultRowsFrame) String() string { + return fmt.Sprintf("[result_rows meta=%v]", f.meta) +} + +func (f *framer) parseResultRows() frame { + result := &resultRowsFrame{} + result.meta = f.parseResultMetadata() + + result.numRows = f.readInt() + if result.numRows < 0 { + panic(fmt.Errorf("invalid row_count in result frame: %d", result.numRows)) + } + + return result +} + +type resultKeyspaceFrame struct { + frameHeader + keyspace string +} + +func (r *resultKeyspaceFrame) String() string { + return fmt.Sprintf("[result_keyspace keyspace=%s]", r.keyspace) +} + +func (f *framer) parseResultSetKeyspace() frame { + return &resultKeyspaceFrame{ + frameHeader: *f.header, + keyspace: f.readString(), + } +} + +type resultPreparedFrame struct { + frameHeader + + preparedID []byte + reqMeta preparedMetadata + respMeta resultMetadata +} + +func (f *framer) parseResultPrepared() frame { + frame := &resultPreparedFrame{ + frameHeader: *f.header, + preparedID: f.readShortBytes(), + reqMeta: f.parsePreparedMetadata(), + } + + if f.proto < protoVersion2 { + return frame + } + + frame.respMeta = f.parseResultMetadata() + + return frame +} + +type schemaChangeKeyspace struct { + frameHeader + + change string + keyspace string +} + +func (f schemaChangeKeyspace) String() string { + return fmt.Sprintf("[event schema_change_keyspace change=%q keyspace=%q]", f.change, f.keyspace) +} + +type schemaChangeTable struct { + frameHeader + + change string + keyspace string + object string +} + +func (f schemaChangeTable) String() string { + return fmt.Sprintf("[event schema_change change=%q keyspace=%q object=%q]", f.change, f.keyspace, f.object) +} + +type schemaChangeType struct { + frameHeader + + change string + keyspace string + object string +} + +type schemaChangeFunction struct { + frameHeader + + change string + keyspace string + name string + args []string +} + +type schemaChangeAggregate struct { + frameHeader + + change string + keyspace string + name string + args []string +} + +func (f *framer) parseResultSchemaChange() frame { + if f.proto <= protoVersion2 { + change := f.readString() + keyspace := f.readString() + table := f.readString() + + if table != "" { + return &schemaChangeTable{ + frameHeader: *f.header, + change: change, + keyspace: keyspace, + object: table, + } + } else { + return &schemaChangeKeyspace{ + frameHeader: *f.header, + change: change, + keyspace: keyspace, + } + } + } else { + change := f.readString() + target := f.readString() + + // TODO: could just use a separate type for each target + switch target { + case "KEYSPACE": + frame := &schemaChangeKeyspace{ + frameHeader: *f.header, + change: change, + } + + frame.keyspace = f.readString() + + return frame + case "TABLE": + frame := &schemaChangeTable{ + frameHeader: *f.header, + change: change, + } + + frame.keyspace = f.readString() + frame.object = f.readString() + + return frame + case "TYPE": + frame := &schemaChangeType{ + frameHeader: *f.header, + change: change, + } + + frame.keyspace = f.readString() + frame.object = f.readString() + + return frame + case "FUNCTION": + frame := &schemaChangeFunction{ + frameHeader: *f.header, + change: change, + } + + frame.keyspace = f.readString() + frame.name = f.readString() + frame.args = f.readStringList() + + return frame + case "AGGREGATE": + frame := &schemaChangeAggregate{ + frameHeader: *f.header, + change: change, + } + + frame.keyspace = f.readString() + frame.name = f.readString() + frame.args = f.readStringList() + + return frame + default: + panic(fmt.Errorf("gocql: unknown SCHEMA_CHANGE target: %q change: %q", target, change)) + } + } + +} + +type authenticateFrame struct { + frameHeader + + class string +} + +func (a *authenticateFrame) String() string { + return fmt.Sprintf("[authenticate class=%q]", a.class) +} + +func (f *framer) parseAuthenticateFrame() frame { + return &authenticateFrame{ + frameHeader: *f.header, + class: f.readString(), + } +} + +type authSuccessFrame struct { + frameHeader + + data []byte +} + +func (a *authSuccessFrame) String() string { + return fmt.Sprintf("[auth_success data=%q]", a.data) +} + +func (f *framer) parseAuthSuccessFrame() frame { + return &authSuccessFrame{ + frameHeader: *f.header, + data: f.readBytes(), + } +} + +type authChallengeFrame struct { + frameHeader + + data []byte +} + +func (a *authChallengeFrame) String() string { + return fmt.Sprintf("[auth_challenge data=%q]", a.data) +} + +func (f *framer) parseAuthChallengeFrame() frame { + return &authChallengeFrame{ + frameHeader: *f.header, + data: f.readBytes(), + } +} + +type statusChangeEventFrame struct { + frameHeader + + change string + host net.IP + port int +} + +func (t statusChangeEventFrame) String() string { + return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port) +} + +// essentially the same as statusChange +type topologyChangeEventFrame struct { + frameHeader + + change string + host net.IP + port int +} + +func (t topologyChangeEventFrame) String() string { + return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port) +} + +func (f *framer) parseEventFrame() frame { + eventType := f.readString() + + switch eventType { + case "TOPOLOGY_CHANGE": + frame := &topologyChangeEventFrame{frameHeader: *f.header} + frame.change = f.readString() + frame.host, frame.port = f.readInet() + + return frame + case "STATUS_CHANGE": + frame := &statusChangeEventFrame{frameHeader: *f.header} + frame.change = f.readString() + frame.host, frame.port = f.readInet() + + return frame + case "SCHEMA_CHANGE": + // this should work for all versions + return f.parseResultSchemaChange() + default: + panic(fmt.Errorf("gocql: unknown event type: %q", eventType)) + } + +} + +type writeAuthResponseFrame struct { + data []byte +} + +func (a *writeAuthResponseFrame) String() string { + return fmt.Sprintf("[auth_response data=%q]", a.data) +} + +func (a *writeAuthResponseFrame) buildFrame(framer *framer, streamID int) error { + return framer.writeAuthResponseFrame(streamID, a.data) +} + +func (f *framer) writeAuthResponseFrame(streamID int, data []byte) error { + f.writeHeader(f.flags, opAuthResponse, streamID) + f.writeBytes(data) + return f.finish() +} + +type queryValues struct { + value []byte + + // optional name, will set With names for values flag + name string + isUnset bool +} + +type queryParams struct { + consistency Consistency + // v2+ + skipMeta bool + values []queryValues + pageSize int + pagingState []byte + serialConsistency SerialConsistency + // v3+ + defaultTimestamp bool + defaultTimestampValue int64 + // v5+ + keyspace string +} + +func (q queryParams) String() string { + return fmt.Sprintf("[query_params consistency=%v skip_meta=%v page_size=%d paging_state=%q serial_consistency=%v default_timestamp=%v values=%v keyspace=%s]", + q.consistency, q.skipMeta, q.pageSize, q.pagingState, q.serialConsistency, q.defaultTimestamp, q.values, q.keyspace) +} + +func (f *framer) writeQueryParams(opts *queryParams) { + f.writeConsistency(opts.consistency) + + if f.proto == protoVersion1 { + return + } + + var flags byte + if len(opts.values) > 0 { + flags |= flagValues + } + if opts.skipMeta { + flags |= flagSkipMetaData + } + if opts.pageSize > 0 { + flags |= flagPageSize + } + if len(opts.pagingState) > 0 { + flags |= flagWithPagingState + } + if opts.serialConsistency > 0 { + flags |= flagWithSerialConsistency + } + + names := false + + // protoV3 specific things + if f.proto > protoVersion2 { + if opts.defaultTimestamp { + flags |= flagDefaultTimestamp + } + + if len(opts.values) > 0 && opts.values[0].name != "" { + flags |= flagWithNameValues + names = true + } + } + + if opts.keyspace != "" { + if f.proto > protoVersion4 { + flags |= flagWithKeyspace + } else { + panic(fmt.Errorf("the keyspace can only be set with protocol 5 or higher")) + } + } + + if f.proto > protoVersion4 { + f.writeUint(uint32(flags)) + } else { + f.writeByte(flags) + } + + if n := len(opts.values); n > 0 { + f.writeShort(uint16(n)) + + for i := 0; i < n; i++ { + if names { + f.writeString(opts.values[i].name) + } + if opts.values[i].isUnset { + f.writeUnset() + } else { + f.writeBytes(opts.values[i].value) + } + } + } + + if opts.pageSize > 0 { + f.writeInt(int32(opts.pageSize)) + } + + if len(opts.pagingState) > 0 { + f.writeBytes(opts.pagingState) + } + + if opts.serialConsistency > 0 { + f.writeConsistency(Consistency(opts.serialConsistency)) + } + + if f.proto > protoVersion2 && opts.defaultTimestamp { + // timestamp in microseconds + var ts int64 + if opts.defaultTimestampValue != 0 { + ts = opts.defaultTimestampValue + } else { + ts = time.Now().UnixNano() / 1000 + } + f.writeLong(ts) + } + + if opts.keyspace != "" { + f.writeString(opts.keyspace) + } +} + +type writeQueryFrame struct { + statement string + params queryParams + + // v4+ + customPayload map[string][]byte +} + +func (w *writeQueryFrame) String() string { + return fmt.Sprintf("[query statement=%q params=%v]", w.statement, w.params) +} + +func (w *writeQueryFrame) buildFrame(framer *framer, streamID int) error { + return framer.writeQueryFrame(streamID, w.statement, &w.params, w.customPayload) +} + +func (f *framer) writeQueryFrame(streamID int, statement string, params *queryParams, customPayload map[string][]byte) error { + if len(customPayload) > 0 { + f.payload() + } + f.writeHeader(f.flags, opQuery, streamID) + f.writeCustomPayload(&customPayload) + f.writeLongString(statement) + f.writeQueryParams(params) + + return f.finish() +} + +type frameBuilder interface { + buildFrame(framer *framer, streamID int) error +} + +type frameWriterFunc func(framer *framer, streamID int) error + +func (f frameWriterFunc) buildFrame(framer *framer, streamID int) error { + return f(framer, streamID) +} + +type writeExecuteFrame struct { + preparedID []byte + params queryParams + + // v4+ + customPayload map[string][]byte +} + +func (e *writeExecuteFrame) String() string { + return fmt.Sprintf("[execute id=% X params=%v]", e.preparedID, &e.params) +} + +func (e *writeExecuteFrame) buildFrame(fr *framer, streamID int) error { + return fr.writeExecuteFrame(streamID, e.preparedID, &e.params, &e.customPayload) +} + +func (f *framer) writeExecuteFrame(streamID int, preparedID []byte, params *queryParams, customPayload *map[string][]byte) error { + if len(*customPayload) > 0 { + f.payload() + } + f.writeHeader(f.flags, opExecute, streamID) + f.writeCustomPayload(customPayload) + f.writeShortBytes(preparedID) + if f.proto > protoVersion1 { + f.writeQueryParams(params) + } else { + n := len(params.values) + f.writeShort(uint16(n)) + for i := 0; i < n; i++ { + if params.values[i].isUnset { + f.writeUnset() + } else { + f.writeBytes(params.values[i].value) + } + } + f.writeConsistency(params.consistency) + } + + return f.finish() +} + +// TODO: can we replace BatchStatemt with batchStatement? As they prety much +// duplicate each other +type batchStatment struct { + preparedID []byte + statement string + values []queryValues +} + +type writeBatchFrame struct { + typ BatchType + statements []batchStatment + consistency Consistency + + // v3+ + serialConsistency SerialConsistency + defaultTimestamp bool + defaultTimestampValue int64 + + //v4+ + customPayload map[string][]byte +} + +func (w *writeBatchFrame) buildFrame(framer *framer, streamID int) error { + return framer.writeBatchFrame(streamID, w, w.customPayload) +} + +func (f *framer) writeBatchFrame(streamID int, w *writeBatchFrame, customPayload map[string][]byte) error { + if len(customPayload) > 0 { + f.payload() + } + f.writeHeader(f.flags, opBatch, streamID) + f.writeCustomPayload(&customPayload) + f.writeByte(byte(w.typ)) + + n := len(w.statements) + f.writeShort(uint16(n)) + + var flags byte + + for i := 0; i < n; i++ { + b := &w.statements[i] + if len(b.preparedID) == 0 { + f.writeByte(0) + f.writeLongString(b.statement) + } else { + f.writeByte(1) + f.writeShortBytes(b.preparedID) + } + + f.writeShort(uint16(len(b.values))) + for j := range b.values { + col := b.values[j] + if f.proto > protoVersion2 && col.name != "" { + // TODO: move this check into the caller and set a flag on writeBatchFrame + // to indicate using named values + if f.proto <= protoVersion5 { + return fmt.Errorf("gocql: named query values are not supported in batches, please see https://issues.apache.org/jira/browse/CASSANDRA-10246") + } + flags |= flagWithNameValues + f.writeString(col.name) + } + if col.isUnset { + f.writeUnset() + } else { + f.writeBytes(col.value) + } + } + } + + f.writeConsistency(w.consistency) + + if f.proto > protoVersion2 { + if w.serialConsistency > 0 { + flags |= flagWithSerialConsistency + } + if w.defaultTimestamp { + flags |= flagDefaultTimestamp + } + + if f.proto > protoVersion4 { + f.writeUint(uint32(flags)) + } else { + f.writeByte(flags) + } + + if w.serialConsistency > 0 { + f.writeConsistency(Consistency(w.serialConsistency)) + } + + if w.defaultTimestamp { + var ts int64 + if w.defaultTimestampValue != 0 { + ts = w.defaultTimestampValue + } else { + ts = time.Now().UnixNano() / 1000 + } + f.writeLong(ts) + } + } + + return f.finish() +} + +type writeOptionsFrame struct{} + +func (w *writeOptionsFrame) buildFrame(framer *framer, streamID int) error { + return framer.writeOptionsFrame(streamID, w) +} + +func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error { + f.writeHeader(f.flags&^flagCompress, opOptions, stream) + return f.finish() +} + +type writeRegisterFrame struct { + events []string +} + +func (w *writeRegisterFrame) buildFrame(framer *framer, streamID int) error { + return framer.writeRegisterFrame(streamID, w) +} + +func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error { + f.writeHeader(f.flags, opRegister, streamID) + f.writeStringList(w.events) + + return f.finish() +} + +func (f *framer) readByte() byte { + if len(f.buf) < 1 { + panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.buf))) + } + + b := f.buf[0] + f.buf = f.buf[1:] + return b +} + +func (f *framer) readInt() (n int) { + if len(f.buf) < 4 { + panic(fmt.Errorf("not enough bytes in buffer to read int require 4 got: %d", len(f.buf))) + } + + n = int(int32(f.buf[0])<<24 | int32(f.buf[1])<<16 | int32(f.buf[2])<<8 | int32(f.buf[3])) + f.buf = f.buf[4:] + return +} + +func (f *framer) readShort() (n uint16) { + if len(f.buf) < 2 { + panic(fmt.Errorf("not enough bytes in buffer to read short require 2 got: %d", len(f.buf))) + } + n = uint16(f.buf[0])<<8 | uint16(f.buf[1]) + f.buf = f.buf[2:] + return +} + +func (f *framer) readString() (s string) { + size := f.readShort() + + if len(f.buf) < int(size) { + panic(fmt.Errorf("not enough bytes in buffer to read string require %d got: %d", size, len(f.buf))) + } + + s = string(f.buf[:size]) + f.buf = f.buf[size:] + return +} + +func (f *framer) readLongString() (s string) { + size := f.readInt() + + if len(f.buf) < size { + panic(fmt.Errorf("not enough bytes in buffer to read long string require %d got: %d", size, len(f.buf))) + } + + s = string(f.buf[:size]) + f.buf = f.buf[size:] + return +} + +func (f *framer) readUUID() *UUID { + if len(f.buf) < 16 { + panic(fmt.Errorf("not enough bytes in buffer to read uuid require %d got: %d", 16, len(f.buf))) + } + + // TODO: how to handle this error, if it is a uuid, then sureley, problems? + u, _ := UUIDFromBytes(f.buf[:16]) + f.buf = f.buf[16:] + return &u +} + +func (f *framer) readStringList() []string { + size := f.readShort() + + l := make([]string, size) + for i := 0; i < int(size); i++ { + l[i] = f.readString() + } + + return l +} + +func (f *framer) readBytesInternal() ([]byte, error) { + size := f.readInt() + if size < 0 { + return nil, nil + } + + if len(f.buf) < size { + return nil, fmt.Errorf("not enough bytes in buffer to read bytes require %d got: %d", size, len(f.buf)) + } + + l := f.buf[:size] + f.buf = f.buf[size:] + + return l, nil +} + +func (f *framer) readBytes() []byte { + l, err := f.readBytesInternal() + if err != nil { + panic(err) + } + + return l +} + +func (f *framer) readShortBytes() []byte { + size := f.readShort() + if len(f.buf) < int(size) { + panic(fmt.Errorf("not enough bytes in buffer to read short bytes: require %d got %d", size, len(f.buf))) + } + + l := f.buf[:size] + f.buf = f.buf[size:] + + return l +} + +func (f *framer) readInetAdressOnly() net.IP { + if len(f.buf) < 1 { + panic(fmt.Errorf("not enough bytes in buffer to read inet size require %d got: %d", 1, len(f.buf))) + } + + size := f.buf[0] + f.buf = f.buf[1:] + + if !(size == 4 || size == 16) { + panic(fmt.Errorf("invalid IP size: %d", size)) + } + + if len(f.buf) < 1 { + panic(fmt.Errorf("not enough bytes in buffer to read inet require %d got: %d", size, len(f.buf))) + } + + ip := make([]byte, size) + copy(ip, f.buf[:size]) + f.buf = f.buf[size:] + return net.IP(ip) +} + +func (f *framer) readInet() (net.IP, int) { + return f.readInetAdressOnly(), f.readInt() +} + +func (f *framer) readConsistency() Consistency { + return Consistency(f.readShort()) +} + +func (f *framer) readBytesMap() map[string][]byte { + size := f.readShort() + m := make(map[string][]byte, size) + + for i := 0; i < int(size); i++ { + k := f.readString() + v := f.readBytes() + m[k] = v + } + + return m +} + +func (f *framer) readStringMultiMap() map[string][]string { + size := f.readShort() + m := make(map[string][]string, size) + + for i := 0; i < int(size); i++ { + k := f.readString() + v := f.readStringList() + m[k] = v + } + + return m +} + +func (f *framer) writeByte(b byte) { + f.buf = append(f.buf, b) +} + +func appendBytes(p []byte, d []byte) []byte { + if d == nil { + return appendInt(p, -1) + } + p = appendInt(p, int32(len(d))) + p = append(p, d...) + return p +} + +func appendShort(p []byte, n uint16) []byte { + return append(p, + byte(n>>8), + byte(n), + ) +} + +func appendInt(p []byte, n int32) []byte { + return append(p, byte(n>>24), + byte(n>>16), + byte(n>>8), + byte(n)) +} + +func appendUint(p []byte, n uint32) []byte { + return append(p, byte(n>>24), + byte(n>>16), + byte(n>>8), + byte(n)) +} + +func appendLong(p []byte, n int64) []byte { + return append(p, + byte(n>>56), + byte(n>>48), + byte(n>>40), + byte(n>>32), + byte(n>>24), + byte(n>>16), + byte(n>>8), + byte(n), + ) +} + +func (f *framer) writeCustomPayload(customPayload *map[string][]byte) { + if len(*customPayload) > 0 { + if f.proto < protoVersion4 { + panic("Custom payload is not supported with version V3 or less") + } + f.writeBytesMap(*customPayload) + } +} + +// these are protocol level binary types +func (f *framer) writeInt(n int32) { + f.buf = appendInt(f.buf, n) +} + +func (f *framer) writeUint(n uint32) { + f.buf = appendUint(f.buf, n) +} + +func (f *framer) writeShort(n uint16) { + f.buf = appendShort(f.buf, n) +} + +func (f *framer) writeLong(n int64) { + f.buf = appendLong(f.buf, n) +} + +func (f *framer) writeString(s string) { + f.writeShort(uint16(len(s))) + f.buf = append(f.buf, s...) +} + +func (f *framer) writeLongString(s string) { + f.writeInt(int32(len(s))) + f.buf = append(f.buf, s...) +} + +func (f *framer) writeStringList(l []string) { + f.writeShort(uint16(len(l))) + for _, s := range l { + f.writeString(s) + } +} + +func (f *framer) writeUnset() { + // Protocol version 4 specifies that bind variables do not require having a + // value when executing a statement. Bind variables without a value are + // called 'unset'. The 'unset' bind variable is serialized as the int + // value '-2' without following bytes. + f.writeInt(-2) +} + +func (f *framer) writeBytes(p []byte) { + // TODO: handle null case correctly, + // [bytes] A [int] n, followed by n bytes if n >= 0. If n < 0, + // no byte should follow and the value represented is `null`. + if p == nil { + f.writeInt(-1) + } else { + f.writeInt(int32(len(p))) + f.buf = append(f.buf, p...) + } +} + +func (f *framer) writeShortBytes(p []byte) { + f.writeShort(uint16(len(p))) + f.buf = append(f.buf, p...) +} + +func (f *framer) writeConsistency(cons Consistency) { + f.writeShort(uint16(cons)) +} + +func (f *framer) writeStringMap(m map[string]string) { + f.writeShort(uint16(len(m))) + for k, v := range m { + f.writeString(k) + f.writeString(v) + } +} + +func (f *framer) writeBytesMap(m map[string][]byte) { + f.writeShort(uint16(len(m))) + for k, v := range m { + f.writeString(k) + f.writeBytes(v) + } +} diff --git a/vendor/github.com/gocql/gocql/fuzz.go b/vendor/github.com/gocql/gocql/fuzz.go new file mode 100644 index 000000000..0d4cff0e5 --- /dev/null +++ b/vendor/github.com/gocql/gocql/fuzz.go @@ -0,0 +1,34 @@ +//go:build gofuzz +// +build gofuzz + +package gocql + +import "bytes" + +func Fuzz(data []byte) int { + var bw bytes.Buffer + + r := bytes.NewReader(data) + + head, err := readHeader(r, make([]byte, 9)) + if err != nil { + return 0 + } + + framer := newFramer(r, &bw, nil, byte(head.version)) + err = framer.readFrame(&head) + if err != nil { + return 0 + } + + frame, err := framer.parseFrame() + if err != nil { + return 0 + } + + if frame != nil { + return 1 + } + + return 2 +} diff --git a/vendor/github.com/gocql/gocql/helpers.go b/vendor/github.com/gocql/gocql/helpers.go new file mode 100644 index 000000000..00f339779 --- /dev/null +++ b/vendor/github.com/gocql/gocql/helpers.go @@ -0,0 +1,448 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "fmt" + "math/big" + "net" + "reflect" + "strings" + "time" + + "gopkg.in/inf.v0" +) + +type RowData struct { + Columns []string + Values []interface{} +} + +func goType(t TypeInfo) (reflect.Type, error) { + switch t.Type() { + case TypeVarchar, TypeAscii, TypeInet, TypeText: + return reflect.TypeOf(*new(string)), nil + case TypeBigInt, TypeCounter: + return reflect.TypeOf(*new(int64)), nil + case TypeTime: + return reflect.TypeOf(*new(time.Duration)), nil + case TypeTimestamp: + return reflect.TypeOf(*new(time.Time)), nil + case TypeBlob: + return reflect.TypeOf(*new([]byte)), nil + case TypeBoolean: + return reflect.TypeOf(*new(bool)), nil + case TypeFloat: + return reflect.TypeOf(*new(float32)), nil + case TypeDouble: + return reflect.TypeOf(*new(float64)), nil + case TypeInt: + return reflect.TypeOf(*new(int)), nil + case TypeSmallInt: + return reflect.TypeOf(*new(int16)), nil + case TypeTinyInt: + return reflect.TypeOf(*new(int8)), nil + case TypeDecimal: + return reflect.TypeOf(*new(*inf.Dec)), nil + case TypeUUID, TypeTimeUUID: + return reflect.TypeOf(*new(UUID)), nil + case TypeList, TypeSet: + elemType, err := goType(t.(CollectionType).Elem) + if err != nil { + return nil, err + } + return reflect.SliceOf(elemType), nil + case TypeMap: + keyType, err := goType(t.(CollectionType).Key) + if err != nil { + return nil, err + } + valueType, err := goType(t.(CollectionType).Elem) + if err != nil { + return nil, err + } + return reflect.MapOf(keyType, valueType), nil + case TypeVarint: + return reflect.TypeOf(*new(*big.Int)), nil + case TypeTuple: + // what can we do here? all there is to do is to make a list of interface{} + tuple := t.(TupleTypeInfo) + return reflect.TypeOf(make([]interface{}, len(tuple.Elems))), nil + case TypeUDT: + return reflect.TypeOf(make(map[string]interface{})), nil + case TypeDate: + return reflect.TypeOf(*new(time.Time)), nil + case TypeDuration: + return reflect.TypeOf(*new(Duration)), nil + default: + return nil, fmt.Errorf("cannot create Go type for unknown CQL type %s", t) + } +} + +func dereference(i interface{}) interface{} { + return reflect.Indirect(reflect.ValueOf(i)).Interface() +} + +func getCassandraBaseType(name string) Type { + switch name { + case "ascii": + return TypeAscii + case "bigint": + return TypeBigInt + case "blob": + return TypeBlob + case "boolean": + return TypeBoolean + case "counter": + return TypeCounter + case "date": + return TypeDate + case "decimal": + return TypeDecimal + case "double": + return TypeDouble + case "duration": + return TypeDuration + case "float": + return TypeFloat + case "int": + return TypeInt + case "smallint": + return TypeSmallInt + case "tinyint": + return TypeTinyInt + case "time": + return TypeTime + case "timestamp": + return TypeTimestamp + case "uuid": + return TypeUUID + case "varchar": + return TypeVarchar + case "text": + return TypeText + case "varint": + return TypeVarint + case "timeuuid": + return TypeTimeUUID + case "inet": + return TypeInet + case "MapType": + return TypeMap + case "ListType": + return TypeList + case "SetType": + return TypeSet + case "TupleType": + return TypeTuple + default: + return TypeCustom + } +} + +func getCassandraType(name string, logger StdLogger) TypeInfo { + if strings.HasPrefix(name, "frozen<") { + return getCassandraType(strings.TrimPrefix(name[:len(name)-1], "frozen<"), logger) + } else if strings.HasPrefix(name, "set<") { + return CollectionType{ + NativeType: NativeType{typ: TypeSet}, + Elem: getCassandraType(strings.TrimPrefix(name[:len(name)-1], "set<"), logger), + } + } else if strings.HasPrefix(name, "list<") { + return CollectionType{ + NativeType: NativeType{typ: TypeList}, + Elem: getCassandraType(strings.TrimPrefix(name[:len(name)-1], "list<"), logger), + } + } else if strings.HasPrefix(name, "map<") { + names := splitCompositeTypes(strings.TrimPrefix(name[:len(name)-1], "map<")) + if len(names) != 2 { + logger.Printf("Error parsing map type, it has %d subelements, expecting 2\n", len(names)) + return NativeType{ + typ: TypeCustom, + } + } + return CollectionType{ + NativeType: NativeType{typ: TypeMap}, + Key: getCassandraType(names[0], logger), + Elem: getCassandraType(names[1], logger), + } + } else if strings.HasPrefix(name, "tuple<") { + names := splitCompositeTypes(strings.TrimPrefix(name[:len(name)-1], "tuple<")) + types := make([]TypeInfo, len(names)) + + for i, name := range names { + types[i] = getCassandraType(name, logger) + } + + return TupleTypeInfo{ + NativeType: NativeType{typ: TypeTuple}, + Elems: types, + } + } else { + return NativeType{ + typ: getCassandraBaseType(name), + } + } +} + +func splitCompositeTypes(name string) []string { + if !strings.Contains(name, "<") { + return strings.Split(name, ", ") + } + var parts []string + lessCount := 0 + segment := "" + for _, char := range name { + if char == ',' && lessCount == 0 { + if segment != "" { + parts = append(parts, strings.TrimSpace(segment)) + } + segment = "" + continue + } + segment += string(char) + if char == '<' { + lessCount++ + } else if char == '>' { + lessCount-- + } + } + if segment != "" { + parts = append(parts, strings.TrimSpace(segment)) + } + return parts +} + +func apacheToCassandraType(t string) string { + t = strings.Replace(t, apacheCassandraTypePrefix, "", -1) + t = strings.Replace(t, "(", "<", -1) + t = strings.Replace(t, ")", ">", -1) + types := strings.FieldsFunc(t, func(r rune) bool { + return r == '<' || r == '>' || r == ',' + }) + for _, typ := range types { + t = strings.Replace(t, typ, getApacheCassandraType(typ).String(), -1) + } + // This is done so it exactly matches what Cassandra returns + return strings.Replace(t, ",", ", ", -1) +} + +func getApacheCassandraType(class string) Type { + switch strings.TrimPrefix(class, apacheCassandraTypePrefix) { + case "AsciiType": + return TypeAscii + case "LongType": + return TypeBigInt + case "BytesType": + return TypeBlob + case "BooleanType": + return TypeBoolean + case "CounterColumnType": + return TypeCounter + case "DecimalType": + return TypeDecimal + case "DoubleType": + return TypeDouble + case "FloatType": + return TypeFloat + case "Int32Type": + return TypeInt + case "ShortType": + return TypeSmallInt + case "ByteType": + return TypeTinyInt + case "TimeType": + return TypeTime + case "DateType", "TimestampType": + return TypeTimestamp + case "UUIDType", "LexicalUUIDType": + return TypeUUID + case "UTF8Type": + return TypeVarchar + case "IntegerType": + return TypeVarint + case "TimeUUIDType": + return TypeTimeUUID + case "InetAddressType": + return TypeInet + case "MapType": + return TypeMap + case "ListType": + return TypeList + case "SetType": + return TypeSet + case "TupleType": + return TypeTuple + case "DurationType": + return TypeDuration + default: + return TypeCustom + } +} + +func (r *RowData) rowMap(m map[string]interface{}) { + for i, column := range r.Columns { + val := dereference(r.Values[i]) + if valVal := reflect.ValueOf(val); valVal.Kind() == reflect.Slice { + valCopy := reflect.MakeSlice(valVal.Type(), valVal.Len(), valVal.Cap()) + reflect.Copy(valCopy, valVal) + m[column] = valCopy.Interface() + } else { + m[column] = val + } + } +} + +// TupeColumnName will return the column name of a tuple value in a column named +// c at index n. It should be used if a specific element within a tuple is needed +// to be extracted from a map returned from SliceMap or MapScan. +func TupleColumnName(c string, n int) string { + return fmt.Sprintf("%s[%d]", c, n) +} + +func (iter *Iter) RowData() (RowData, error) { + if iter.err != nil { + return RowData{}, iter.err + } + + columns := make([]string, 0, len(iter.Columns())) + values := make([]interface{}, 0, len(iter.Columns())) + + for _, column := range iter.Columns() { + if c, ok := column.TypeInfo.(TupleTypeInfo); !ok { + val, err := column.TypeInfo.NewWithError() + if err != nil { + return RowData{}, err + } + columns = append(columns, column.Name) + values = append(values, val) + } else { + for i, elem := range c.Elems { + columns = append(columns, TupleColumnName(column.Name, i)) + val, err := elem.NewWithError() + if err != nil { + return RowData{}, err + } + values = append(values, val) + } + } + } + + rowData := RowData{ + Columns: columns, + Values: values, + } + + return rowData, nil +} + +// TODO(zariel): is it worth exporting this? +func (iter *Iter) rowMap() (map[string]interface{}, error) { + if iter.err != nil { + return nil, iter.err + } + + rowData, _ := iter.RowData() + iter.Scan(rowData.Values...) + m := make(map[string]interface{}, len(rowData.Columns)) + rowData.rowMap(m) + return m, nil +} + +// SliceMap is a helper function to make the API easier to use +// returns the data from the query in the form of []map[string]interface{} +func (iter *Iter) SliceMap() ([]map[string]interface{}, error) { + if iter.err != nil { + return nil, iter.err + } + + // Not checking for the error because we just did + rowData, _ := iter.RowData() + dataToReturn := make([]map[string]interface{}, 0) + for iter.Scan(rowData.Values...) { + m := make(map[string]interface{}, len(rowData.Columns)) + rowData.rowMap(m) + dataToReturn = append(dataToReturn, m) + } + if iter.err != nil { + return nil, iter.err + } + return dataToReturn, nil +} + +// MapScan takes a map[string]interface{} and populates it with a row +// that is returned from cassandra. +// +// Each call to MapScan() must be called with a new map object. +// During the call to MapScan() any pointers in the existing map +// are replaced with non pointer types before the call returns +// +// iter := session.Query(`SELECT * FROM mytable`).Iter() +// for { +// // New map each iteration +// row := make(map[string]interface{}) +// if !iter.MapScan(row) { +// break +// } +// // Do things with row +// if fullname, ok := row["fullname"]; ok { +// fmt.Printf("Full Name: %s\n", fullname) +// } +// } +// +// You can also pass pointers in the map before each call +// +// var fullName FullName // Implements gocql.Unmarshaler and gocql.Marshaler interfaces +// var address net.IP +// var age int +// iter := session.Query(`SELECT * FROM scan_map_table`).Iter() +// for { +// // New map each iteration +// row := map[string]interface{}{ +// "fullname": &fullName, +// "age": &age, +// "address": &address, +// } +// if !iter.MapScan(row) { +// break +// } +// fmt.Printf("First: %s Age: %d Address: %q\n", fullName.FirstName, age, address) +// } +func (iter *Iter) MapScan(m map[string]interface{}) bool { + if iter.err != nil { + return false + } + + // Not checking for the error because we just did + rowData, _ := iter.RowData() + + for i, col := range rowData.Columns { + if dest, ok := m[col]; ok { + rowData.Values[i] = dest + } + } + + if iter.Scan(rowData.Values...) { + rowData.rowMap(m) + return true + } + return false +} + +func copyBytes(p []byte) []byte { + b := make([]byte, len(p)) + copy(b, p) + return b +} + +var failDNS = false + +func LookupIP(host string) ([]net.IP, error) { + if failDNS { + return nil, &net.DNSError{} + } + return net.LookupIP(host) + +} diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go new file mode 100644 index 000000000..a0b7058d7 --- /dev/null +++ b/vendor/github.com/gocql/gocql/host_source.go @@ -0,0 +1,899 @@ +package gocql + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "strings" + "sync" + "time" +) + +var ErrCannotFindHost = errors.New("cannot find host") +var ErrHostAlreadyExists = errors.New("host already exists") + +type nodeState int32 + +func (n nodeState) String() string { + if n == NodeUp { + return "UP" + } else if n == NodeDown { + return "DOWN" + } + return fmt.Sprintf("UNKNOWN_%d", n) +} + +const ( + NodeUp nodeState = iota + NodeDown +) + +type cassVersion struct { + Major, Minor, Patch int +} + +func (c *cassVersion) Set(v string) error { + if v == "" { + return nil + } + + return c.UnmarshalCQL(nil, []byte(v)) +} + +func (c *cassVersion) UnmarshalCQL(info TypeInfo, data []byte) error { + return c.unmarshal(data) +} + +func (c *cassVersion) unmarshal(data []byte) error { + version := strings.TrimSuffix(string(data), "-SNAPSHOT") + version = strings.TrimPrefix(version, "v") + v := strings.Split(version, ".") + + if len(v) < 2 { + return fmt.Errorf("invalid version string: %s", data) + } + + var err error + c.Major, err = strconv.Atoi(v[0]) + if err != nil { + return fmt.Errorf("invalid major version %v: %v", v[0], err) + } + + c.Minor, err = strconv.Atoi(v[1]) + if err != nil { + return fmt.Errorf("invalid minor version %v: %v", v[1], err) + } + + if len(v) > 2 { + c.Patch, err = strconv.Atoi(v[2]) + if err != nil { + return fmt.Errorf("invalid patch version %v: %v", v[2], err) + } + } + + return nil +} + +func (c cassVersion) Before(major, minor, patch int) bool { + // We're comparing us (cassVersion) with the provided version (major, minor, patch) + // We return true if our version is lower (comes before) than the provided one. + if c.Major < major { + return true + } else if c.Major == major { + if c.Minor < minor { + return true + } else if c.Minor == minor && c.Patch < patch { + return true + } + + } + return false +} + +func (c cassVersion) AtLeast(major, minor, patch int) bool { + return !c.Before(major, minor, patch) +} + +func (c cassVersion) String() string { + return fmt.Sprintf("v%d.%d.%d", c.Major, c.Minor, c.Patch) +} + +func (c cassVersion) nodeUpDelay() time.Duration { + if c.Major >= 2 && c.Minor >= 2 { + // CASSANDRA-8236 + return 0 + } + + return 10 * time.Second +} + +type HostInfo struct { + // TODO(zariel): reduce locking maybe, not all values will change, but to ensure + // that we are thread safe use a mutex to access all fields. + mu sync.RWMutex + hostname string + peer net.IP + broadcastAddress net.IP + listenAddress net.IP + rpcAddress net.IP + preferredIP net.IP + connectAddress net.IP + port int + dataCenter string + rack string + hostId string + workload string + graph bool + dseVersion string + partitioner string + clusterName string + version cassVersion + state nodeState + schemaVersion string + tokens []string +} + +func (h *HostInfo) Equal(host *HostInfo) bool { + if h == host { + // prevent rlock reentry + return true + } + + return h.ConnectAddress().Equal(host.ConnectAddress()) +} + +func (h *HostInfo) Peer() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.peer +} + +func (h *HostInfo) invalidConnectAddr() bool { + h.mu.RLock() + defer h.mu.RUnlock() + addr, _ := h.connectAddressLocked() + return !validIpAddr(addr) +} + +func validIpAddr(addr net.IP) bool { + return addr != nil && !addr.IsUnspecified() +} + +func (h *HostInfo) connectAddressLocked() (net.IP, string) { + if validIpAddr(h.connectAddress) { + return h.connectAddress, "connect_address" + } else if validIpAddr(h.rpcAddress) { + return h.rpcAddress, "rpc_adress" + } else if validIpAddr(h.preferredIP) { + // where does perferred_ip get set? + return h.preferredIP, "preferred_ip" + } else if validIpAddr(h.broadcastAddress) { + return h.broadcastAddress, "broadcast_address" + } else if validIpAddr(h.peer) { + return h.peer, "peer" + } + return net.IPv4zero, "invalid" +} + +// nodeToNodeAddress returns address broadcasted between node to nodes. +// It's either `broadcast_address` if host info is read from system.local or `peer` if read from system.peers. +// This IP address is also part of CQL Event emitted on topology/status changes, +// but does not uniquely identify the node in case multiple nodes use the same IP address. +func (h *HostInfo) nodeToNodeAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + + if validIpAddr(h.broadcastAddress) { + return h.broadcastAddress + } else if validIpAddr(h.peer) { + return h.peer + } + return net.IPv4zero +} + +// Returns the address that should be used to connect to the host. +// If you wish to override this, use an AddressTranslator or +// use a HostFilter to SetConnectAddress() +func (h *HostInfo) ConnectAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + + if addr, _ := h.connectAddressLocked(); validIpAddr(addr) { + return addr + } + panic(fmt.Sprintf("no valid connect address for host: %v. Is your cluster configured correctly?", h)) +} + +func (h *HostInfo) SetConnectAddress(address net.IP) *HostInfo { + // TODO(zariel): should this not be exported? + h.mu.Lock() + defer h.mu.Unlock() + h.connectAddress = address + return h +} + +func (h *HostInfo) BroadcastAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.broadcastAddress +} + +func (h *HostInfo) ListenAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.listenAddress +} + +func (h *HostInfo) RPCAddress() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.rpcAddress +} + +func (h *HostInfo) PreferredIP() net.IP { + h.mu.RLock() + defer h.mu.RUnlock() + return h.preferredIP +} + +func (h *HostInfo) DataCenter() string { + h.mu.RLock() + dc := h.dataCenter + h.mu.RUnlock() + return dc +} + +func (h *HostInfo) Rack() string { + h.mu.RLock() + rack := h.rack + h.mu.RUnlock() + return rack +} + +func (h *HostInfo) HostID() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.hostId +} + +func (h *HostInfo) SetHostID(hostID string) { + h.mu.Lock() + defer h.mu.Unlock() + h.hostId = hostID +} + +func (h *HostInfo) WorkLoad() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.workload +} + +func (h *HostInfo) Graph() bool { + h.mu.RLock() + defer h.mu.RUnlock() + return h.graph +} + +func (h *HostInfo) DSEVersion() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.dseVersion +} + +func (h *HostInfo) Partitioner() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.partitioner +} + +func (h *HostInfo) ClusterName() string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.clusterName +} + +func (h *HostInfo) Version() cassVersion { + h.mu.RLock() + defer h.mu.RUnlock() + return h.version +} + +func (h *HostInfo) State() nodeState { + h.mu.RLock() + defer h.mu.RUnlock() + return h.state +} + +func (h *HostInfo) setState(state nodeState) *HostInfo { + h.mu.Lock() + defer h.mu.Unlock() + h.state = state + return h +} + +func (h *HostInfo) Tokens() []string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.tokens +} + +func (h *HostInfo) Port() int { + h.mu.RLock() + defer h.mu.RUnlock() + return h.port +} + +func (h *HostInfo) update(from *HostInfo) { + if h == from { + return + } + + h.mu.Lock() + defer h.mu.Unlock() + + from.mu.RLock() + defer from.mu.RUnlock() + + // autogenerated do not update + if h.peer == nil { + h.peer = from.peer + } + if h.broadcastAddress == nil { + h.broadcastAddress = from.broadcastAddress + } + if h.listenAddress == nil { + h.listenAddress = from.listenAddress + } + if h.rpcAddress == nil { + h.rpcAddress = from.rpcAddress + } + if h.preferredIP == nil { + h.preferredIP = from.preferredIP + } + if h.connectAddress == nil { + h.connectAddress = from.connectAddress + } + if h.port == 0 { + h.port = from.port + } + if h.dataCenter == "" { + h.dataCenter = from.dataCenter + } + if h.rack == "" { + h.rack = from.rack + } + if h.hostId == "" { + h.hostId = from.hostId + } + if h.workload == "" { + h.workload = from.workload + } + if h.dseVersion == "" { + h.dseVersion = from.dseVersion + } + if h.partitioner == "" { + h.partitioner = from.partitioner + } + if h.clusterName == "" { + h.clusterName = from.clusterName + } + if h.version == (cassVersion{}) { + h.version = from.version + } + if h.tokens == nil { + h.tokens = from.tokens + } +} + +func (h *HostInfo) IsUp() bool { + return h != nil && h.State() == NodeUp +} + +func (h *HostInfo) HostnameAndPort() string { + h.mu.Lock() + defer h.mu.Unlock() + if h.hostname == "" { + addr, _ := h.connectAddressLocked() + h.hostname = addr.String() + } + return net.JoinHostPort(h.hostname, strconv.Itoa(h.port)) +} + +func (h *HostInfo) ConnectAddressAndPort() string { + h.mu.Lock() + defer h.mu.Unlock() + addr, _ := h.connectAddressLocked() + return net.JoinHostPort(addr.String(), strconv.Itoa(h.port)) +} + +func (h *HostInfo) String() string { + h.mu.RLock() + defer h.mu.RUnlock() + + connectAddr, source := h.connectAddressLocked() + return fmt.Sprintf("[HostInfo hostname=%q connectAddress=%q peer=%q rpc_address=%q broadcast_address=%q "+ + "preferred_ip=%q connect_addr=%q connect_addr_source=%q "+ + "port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", + h.hostname, h.connectAddress, h.peer, h.rpcAddress, h.broadcastAddress, h.preferredIP, + connectAddr, source, + h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens)) +} + +// Polls system.peers at a specific interval to find new hosts +type ringDescriber struct { + session *Session + mu sync.Mutex + prevHosts []*HostInfo + prevPartitioner string +} + +// Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces +func checkSystemSchema(control *controlConn) (bool, error) { + iter := control.query("SELECT * FROM system_schema.keyspaces") + if err := iter.err; err != nil { + if errf, ok := err.(*errorFrame); ok { + if errf.code == ErrCodeSyntax { + return false, nil + } + } + + return false, err + } + + return true, nil +} + +// Given a map that represents a row from either system.local or system.peers +// return as much information as we can in *HostInfo +func (s *Session) hostInfoFromMap(row map[string]interface{}, host *HostInfo) (*HostInfo, error) { + const assertErrorMsg = "Assertion failed for %s" + var ok bool + + // Default to our connected port if the cluster doesn't have port information + for key, value := range row { + switch key { + case "data_center": + host.dataCenter, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "data_center") + } + case "rack": + host.rack, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "rack") + } + case "host_id": + hostId, ok := value.(UUID) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "host_id") + } + host.hostId = hostId.String() + case "release_version": + version, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "release_version") + } + host.version.Set(version) + case "peer": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "peer") + } + host.peer = net.ParseIP(ip) + case "cluster_name": + host.clusterName, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "cluster_name") + } + case "partitioner": + host.partitioner, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "partitioner") + } + case "broadcast_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "broadcast_address") + } + host.broadcastAddress = net.ParseIP(ip) + case "preferred_ip": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "preferred_ip") + } + host.preferredIP = net.ParseIP(ip) + case "rpc_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "rpc_address") + } + host.rpcAddress = net.ParseIP(ip) + case "native_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "native_address") + } + host.rpcAddress = net.ParseIP(ip) + case "listen_address": + ip, ok := value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "listen_address") + } + host.listenAddress = net.ParseIP(ip) + case "native_port": + native_port, ok := value.(int) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "native_port") + } + host.port = native_port + case "workload": + host.workload, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "workload") + } + case "graph": + host.graph, ok = value.(bool) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "graph") + } + case "tokens": + host.tokens, ok = value.([]string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "tokens") + } + case "dse_version": + host.dseVersion, ok = value.(string) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "dse_version") + } + case "schema_version": + schemaVersion, ok := value.(UUID) + if !ok { + return nil, fmt.Errorf(assertErrorMsg, "schema_version") + } + host.schemaVersion = schemaVersion.String() + } + // TODO(thrawn01): Add 'port'? once CASSANDRA-7544 is complete + // Not sure what the port field will be called until the JIRA issue is complete + } + + ip, port := s.cfg.translateAddressPort(host.ConnectAddress(), host.port) + host.connectAddress = ip + host.port = port + + return host, nil +} + +func (s *Session) hostInfoFromIter(iter *Iter, connectAddress net.IP, defaultPort int) (*HostInfo, error) { + rows, err := iter.SliceMap() + if err != nil { + // TODO(zariel): make typed error + return nil, err + } + + if len(rows) == 0 { + return nil, errors.New("query returned 0 rows") + } + + host, err := s.hostInfoFromMap(rows[0], &HostInfo{connectAddress: connectAddress, port: defaultPort}) + if err != nil { + return nil, err + } + return host, nil +} + +// Ask the control node for the local host info +func (r *ringDescriber) getLocalHostInfo() (*HostInfo, error) { + if r.session.control == nil { + return nil, errNoControl + } + + iter := r.session.control.withConnHost(func(ch *connHost) *Iter { + return ch.conn.querySystemLocal(context.TODO()) + }) + + if iter == nil { + return nil, errNoControl + } + + host, err := r.session.hostInfoFromIter(iter, nil, r.session.cfg.Port) + if err != nil { + return nil, fmt.Errorf("could not retrieve local host info: %w", err) + } + return host, nil +} + +// Ask the control node for host info on all it's known peers +func (r *ringDescriber) getClusterPeerInfo(localHost *HostInfo) ([]*HostInfo, error) { + if r.session.control == nil { + return nil, errNoControl + } + + var peers []*HostInfo + iter := r.session.control.withConnHost(func(ch *connHost) *Iter { + return ch.conn.querySystemPeers(context.TODO(), localHost.version) + }) + + if iter == nil { + return nil, errNoControl + } + + rows, err := iter.SliceMap() + if err != nil { + // TODO(zariel): make typed error + return nil, fmt.Errorf("unable to fetch peer host info: %s", err) + } + + for _, row := range rows { + // extract all available info about the peer + host, err := r.session.hostInfoFromMap(row, &HostInfo{port: r.session.cfg.Port}) + if err != nil { + return nil, err + } else if !isValidPeer(host) { + // If it's not a valid peer + r.session.logger.Printf("Found invalid peer '%s' "+ + "Likely due to a gossip or snitch issue, this host will be ignored", host) + continue + } + + peers = append(peers, host) + } + + return peers, nil +} + +// Return true if the host is a valid peer +func isValidPeer(host *HostInfo) bool { + return !(len(host.RPCAddress()) == 0 || + host.hostId == "" || + host.dataCenter == "" || + host.rack == "" || + len(host.tokens) == 0) +} + +// GetHosts returns a list of hosts found via queries to system.local and system.peers +func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + localHost, err := r.getLocalHostInfo() + if err != nil { + return r.prevHosts, r.prevPartitioner, err + } + + peerHosts, err := r.getClusterPeerInfo(localHost) + if err != nil { + return r.prevHosts, r.prevPartitioner, err + } + + hosts := append([]*HostInfo{localHost}, peerHosts...) + var partitioner string + if len(hosts) > 0 { + partitioner = hosts[0].Partitioner() + } + + return hosts, partitioner, nil +} + +// debounceRingRefresh submits a ring refresh request to the ring refresh debouncer. +func (s *Session) debounceRingRefresh() { + s.ringRefresher.debounce() +} + +// refreshRing executes a ring refresh immediately and cancels pending debounce ring refresh requests. +func (s *Session) refreshRing() error { + err, ok := <-s.ringRefresher.refreshNow() + if !ok { + return errors.New("could not refresh ring because stop was requested") + } + + return err +} + +func refreshRing(r *ringDescriber) error { + hosts, partitioner, err := r.GetHosts() + if err != nil { + return err + } + + prevHosts := r.session.ring.currentHosts() + + for _, h := range hosts { + if r.session.cfg.filterHost(h) { + continue + } + + if host, ok := r.session.ring.addHostIfMissing(h); !ok { + r.session.startPoolFill(h) + } else { + // host (by hostID) already exists; determine if IP has changed + newHostID := h.HostID() + existing, ok := prevHosts[newHostID] + if !ok { + return fmt.Errorf("get existing host=%s from prevHosts: %w", h, ErrCannotFindHost) + } + if h.connectAddress.Equal(existing.connectAddress) && h.nodeToNodeAddress().Equal(existing.nodeToNodeAddress()) { + // no host IP change + host.update(h) + } else { + // host IP has changed + // remove old HostInfo (w/old IP) + r.session.removeHost(existing) + if _, alreadyExists := r.session.ring.addHostIfMissing(h); alreadyExists { + return fmt.Errorf("add new host=%s after removal: %w", h, ErrHostAlreadyExists) + } + // add new HostInfo (same hostID, new IP) + r.session.startPoolFill(h) + } + } + delete(prevHosts, h.HostID()) + } + + for _, host := range prevHosts { + r.session.removeHost(host) + } + + r.session.metadata.setPartitioner(partitioner) + r.session.policy.SetPartitioner(partitioner) + return nil +} + +const ( + ringRefreshDebounceTime = 1 * time.Second +) + +// debounces requests to call a refresh function (currently used for ring refresh). It also supports triggering a refresh immediately. +type refreshDebouncer struct { + mu sync.Mutex + stopped bool + broadcaster *errorBroadcaster + interval time.Duration + timer *time.Timer + refreshNowCh chan struct{} + quit chan struct{} + refreshFn func() error +} + +func newRefreshDebouncer(interval time.Duration, refreshFn func() error) *refreshDebouncer { + d := &refreshDebouncer{ + stopped: false, + broadcaster: nil, + refreshNowCh: make(chan struct{}, 1), + quit: make(chan struct{}), + interval: interval, + timer: time.NewTimer(interval), + refreshFn: refreshFn, + } + d.timer.Stop() + go d.flusher() + return d +} + +// debounces a request to call the refresh function +func (d *refreshDebouncer) debounce() { + d.mu.Lock() + defer d.mu.Unlock() + if d.stopped { + return + } + d.timer.Reset(d.interval) +} + +// requests an immediate refresh which will cancel pending refresh requests +func (d *refreshDebouncer) refreshNow() <-chan error { + d.mu.Lock() + defer d.mu.Unlock() + if d.broadcaster == nil { + d.broadcaster = newErrorBroadcaster() + select { + case d.refreshNowCh <- struct{}{}: + default: + // already a refresh pending + } + } + return d.broadcaster.newListener() +} + +func (d *refreshDebouncer) flusher() { + for { + select { + case <-d.refreshNowCh: + case <-d.timer.C: + case <-d.quit: + } + d.mu.Lock() + if d.stopped { + if d.broadcaster != nil { + d.broadcaster.stop() + d.broadcaster = nil + } + d.timer.Stop() + d.mu.Unlock() + return + } + + // make sure both request channels are cleared before we refresh + select { + case <-d.refreshNowCh: + default: + } + + d.timer.Stop() + select { + case <-d.timer.C: + default: + } + + curBroadcaster := d.broadcaster + d.broadcaster = nil + d.mu.Unlock() + + err := d.refreshFn() + if curBroadcaster != nil { + curBroadcaster.broadcast(err) + } + } +} + +func (d *refreshDebouncer) stop() { + d.mu.Lock() + if d.stopped { + d.mu.Unlock() + return + } + d.stopped = true + d.mu.Unlock() + d.quit <- struct{}{} // sync with flusher + close(d.quit) +} + +// broadcasts an error to multiple channels (listeners) +type errorBroadcaster struct { + listeners []chan<- error + mu sync.Mutex +} + +func newErrorBroadcaster() *errorBroadcaster { + return &errorBroadcaster{ + listeners: nil, + mu: sync.Mutex{}, + } +} + +func (b *errorBroadcaster) newListener() <-chan error { + ch := make(chan error, 1) + b.mu.Lock() + defer b.mu.Unlock() + b.listeners = append(b.listeners, ch) + return ch +} + +func (b *errorBroadcaster) broadcast(err error) { + b.mu.Lock() + defer b.mu.Unlock() + curListeners := b.listeners + if len(curListeners) > 0 { + b.listeners = nil + } else { + return + } + + for _, listener := range curListeners { + listener <- err + close(listener) + } +} + +func (b *errorBroadcaster) stop() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.listeners) == 0 { + return + } + for _, listener := range b.listeners { + close(listener) + } + b.listeners = nil +} diff --git a/vendor/github.com/gocql/gocql/host_source_gen.go b/vendor/github.com/gocql/gocql/host_source_gen.go new file mode 100644 index 000000000..8c096ffd6 --- /dev/null +++ b/vendor/github.com/gocql/gocql/host_source_gen.go @@ -0,0 +1,46 @@ +//go:build genhostinfo +// +build genhostinfo + +package main + +import ( + "fmt" + "reflect" + "sync" + + "github.com/gocql/gocql" +) + +func gen(clause, field string) { + fmt.Printf("if h.%s == %s {\n", field, clause) + fmt.Printf("\th.%s = from.%s\n", field, field) + fmt.Println("}") +} + +func main() { + t := reflect.ValueOf(&gocql.HostInfo{}).Elem().Type() + mu := reflect.TypeOf(sync.RWMutex{}) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type == mu { + continue + } + + switch f.Type.Kind() { + case reflect.Slice: + gen("nil", f.Name) + case reflect.String: + gen(`""`, f.Name) + case reflect.Int: + gen("0", f.Name) + case reflect.Struct: + gen("("+f.Type.Name()+"{})", f.Name) + case reflect.Bool, reflect.Int32: + continue + default: + panic(fmt.Sprintf("unknown field: %s", f)) + } + } + +} diff --git a/vendor/github.com/gocql/gocql/install_test_deps.sh b/vendor/github.com/gocql/gocql/install_test_deps.sh new file mode 100644 index 000000000..87f78c2ed --- /dev/null +++ b/vendor/github.com/gocql/gocql/install_test_deps.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -x + +# This is not supposed to be an error-prone script; just a convenience. + +# Install CCM +pip install -i https://pypi.org/simple --user cql PyYAML six psutil +git clone https://github.com/pcmanus/ccm.git +pushd ccm +./setup.py install --user +popd diff --git a/vendor/github.com/gocql/gocql/integration.sh b/vendor/github.com/gocql/gocql/integration.sh new file mode 100644 index 000000000..d23fd7efc --- /dev/null +++ b/vendor/github.com/gocql/gocql/integration.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +set -eux + +function run_tests() { + local clusterSize=3 + local version=$1 + local auth=$2 + local compressor=$3 + + if [ "$auth" = true ]; then + clusterSize=1 + fi + + local keypath="$(pwd)/testdata/pki" + + local conf=( + "client_encryption_options.enabled: true" + "client_encryption_options.keystore: $keypath/.keystore" + "client_encryption_options.keystore_password: cassandra" + "client_encryption_options.require_client_auth: true" + "client_encryption_options.truststore: $keypath/.truststore" + "client_encryption_options.truststore_password: cassandra" + "concurrent_reads: 2" + "concurrent_writes: 2" + "rpc_server_type: sync" + "rpc_min_threads: 2" + "rpc_max_threads: 2" + "write_request_timeout_in_ms: 5000" + "read_request_timeout_in_ms: 5000" + ) + + ccm remove test || true + + ccm create test -v $version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m" + ccm updateconf "${conf[@]}" + + if [ "$auth" = true ] + then + ccm updateconf 'authenticator: PasswordAuthenticator' 'authorizer: CassandraAuthorizer' + rm -rf $HOME/.ccm/test/node1/data/system_auth + fi + + local proto=2 + if [[ $version == 1.2.* ]]; then + proto=1 + elif [[ $version == 2.0.* ]]; then + proto=2 + elif [[ $version == 2.1.* ]]; then + proto=3 + elif [[ $version == 2.2.* || $version == 3.0.* ]]; then + proto=4 + ccm updateconf 'enable_user_defined_functions: true' + export JVM_EXTRA_OPTS=" -Dcassandra.test.fail_writes_ks=test -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler" + elif [[ $version == 3.*.* ]]; then + proto=5 + ccm updateconf 'enable_user_defined_functions: true' + export JVM_EXTRA_OPTS=" -Dcassandra.test.fail_writes_ks=test -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler" + fi + + sleep 1s + + ccm list + ccm start --wait-for-binary-proto + ccm status + ccm node1 nodetool status + + local args="-gocql.timeout=60s -runssl -proto=$proto -rf=3 -clusterSize=$clusterSize -autowait=2000ms -compressor=$compressor -gocql.cversion=$version -cluster=$(ccm liveset) ./..." + + go test -v -tags unit -race + + if [ "$auth" = true ] + then + sleep 30s + go test -run=TestAuthentication -tags "integration gocql_debug" -timeout=15s -runauth $args + else + sleep 1s + go test -tags "cassandra gocql_debug" -timeout=5m -race $args + + ccm clear + ccm start --wait-for-binary-proto + sleep 1s + + go test -tags "integration gocql_debug" -timeout=5m -race $args + + ccm clear + ccm start --wait-for-binary-proto + sleep 1s + + go test -tags "ccm gocql_debug" -timeout=5m -race $args + fi + + ccm remove +} + +run_tests $1 $2 $3 diff --git a/vendor/github.com/gocql/gocql/internal/lru/lru.go b/vendor/github.com/gocql/gocql/internal/lru/lru.go new file mode 100644 index 000000000..14ca1f433 --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/lru/lru.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 To gocql authors +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +// +// This cache has been forked from github.com/golang/groupcache/lru, but +// specialized with string keys to avoid the allocations caused by wrapping them +// in interface{}. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key string, value interface{}) + + ll *list.List + cache map[string]*list.Element +} + +type entry struct { + key string + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[string]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key string, value interface{}) { + if c.cache == nil { + c.cache = make(map[string]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key string) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key string) bool { + if c.cache == nil { + return false + } + + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + return true + } + + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur.go new file mode 100644 index 000000000..d006cc0bf --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/murmur/murmur.go @@ -0,0 +1,135 @@ +package murmur + +const ( + c1 int64 = -8663945395140668459 // 0x87c37b91114253d5 + c2 int64 = 5545529020109919103 // 0x4cf5ad432745937f + fmix1 int64 = -49064778989728563 // 0xff51afd7ed558ccd + fmix2 int64 = -4265267296055464877 // 0xc4ceb9fe1a85ec53 +) + +func fmix(n int64) int64 { + // cast to unsigned for logical right bitshift (to match C* MM3 implementation) + n ^= int64(uint64(n) >> 33) + n *= fmix1 + n ^= int64(uint64(n) >> 33) + n *= fmix2 + n ^= int64(uint64(n) >> 33) + + return n +} + +func block(p byte) int64 { + return int64(int8(p)) +} + +func rotl(x int64, r uint8) int64 { + // cast to unsigned for logical right bitshift (to match C* MM3 implementation) + return (x << r) | (int64)((uint64(x) >> (64 - r))) +} + +func Murmur3H1(data []byte) int64 { + length := len(data) + + var h1, h2, k1, k2 int64 + + // body + nBlocks := length / 16 + for i := 0; i < nBlocks; i++ { + k1, k2 = getBlock(data, i) + + k1 *= c1 + k1 = rotl(k1, 31) + k1 *= c2 + h1 ^= k1 + + h1 = rotl(h1, 27) + h1 += h2 + h1 = h1*5 + 0x52dce729 + + k2 *= c2 + k2 = rotl(k2, 33) + k2 *= c1 + h2 ^= k2 + + h2 = rotl(h2, 31) + h2 += h1 + h2 = h2*5 + 0x38495ab5 + } + + // tail + tail := data[nBlocks*16:] + k1 = 0 + k2 = 0 + switch length & 15 { + case 15: + k2 ^= block(tail[14]) << 48 + fallthrough + case 14: + k2 ^= block(tail[13]) << 40 + fallthrough + case 13: + k2 ^= block(tail[12]) << 32 + fallthrough + case 12: + k2 ^= block(tail[11]) << 24 + fallthrough + case 11: + k2 ^= block(tail[10]) << 16 + fallthrough + case 10: + k2 ^= block(tail[9]) << 8 + fallthrough + case 9: + k2 ^= block(tail[8]) + + k2 *= c2 + k2 = rotl(k2, 33) + k2 *= c1 + h2 ^= k2 + + fallthrough + case 8: + k1 ^= block(tail[7]) << 56 + fallthrough + case 7: + k1 ^= block(tail[6]) << 48 + fallthrough + case 6: + k1 ^= block(tail[5]) << 40 + fallthrough + case 5: + k1 ^= block(tail[4]) << 32 + fallthrough + case 4: + k1 ^= block(tail[3]) << 24 + fallthrough + case 3: + k1 ^= block(tail[2]) << 16 + fallthrough + case 2: + k1 ^= block(tail[1]) << 8 + fallthrough + case 1: + k1 ^= block(tail[0]) + + k1 *= c1 + k1 = rotl(k1, 31) + k1 *= c2 + h1 ^= k1 + } + + h1 ^= int64(length) + h2 ^= int64(length) + + h1 += h2 + h2 += h1 + + h1 = fmix(h1) + h2 = fmix(h2) + + h1 += h2 + // the following is extraneous since h2 is discarded + // h2 += h1 + + return h1 +} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go new file mode 100644 index 000000000..63c3eb2ec --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/murmur/murmur_appengine.go @@ -0,0 +1,11 @@ +// +build appengine s390x + +package murmur + +import "encoding/binary" + +func getBlock(data []byte, n int) (int64, int64) { + k1 := int64(binary.LittleEndian.Uint64(data[n*16:])) + k2 := int64(binary.LittleEndian.Uint64(data[(n*16)+8:])) + return k1, k2 +} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go new file mode 100644 index 000000000..8fc950cfb --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/murmur/murmur_unsafe.go @@ -0,0 +1,16 @@ +// +build !appengine +// +build !s390x + +package murmur + +import ( + "unsafe" +) + +func getBlock(data []byte, n int) (int64, int64) { + block := (*[2]int64)(unsafe.Pointer(&data[n*16])) + + k1 := block[0] + k2 := block[1] + return k1, k2 +} diff --git a/vendor/github.com/gocql/gocql/internal/streams/streams.go b/vendor/github.com/gocql/gocql/internal/streams/streams.go new file mode 100644 index 000000000..ea43412aa --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/streams/streams.go @@ -0,0 +1,140 @@ +package streams + +import ( + "math" + "strconv" + "sync/atomic" +) + +const bucketBits = 64 + +// IDGenerator tracks and allocates streams which are in use. +type IDGenerator struct { + NumStreams int + inuseStreams int32 + numBuckets uint32 + + // streams is a bitset where each bit represents a stream, a 1 implies in use + streams []uint64 + offset uint32 +} + +func New(protocol int) *IDGenerator { + maxStreams := 128 + if protocol > 2 { + maxStreams = 32768 + } + + buckets := maxStreams / 64 + // reserve stream 0 + streams := make([]uint64, buckets) + streams[0] = 1 << 63 + + return &IDGenerator{ + NumStreams: maxStreams, + streams: streams, + numBuckets: uint32(buckets), + offset: uint32(buckets) - 1, + } +} + +func streamFromBucket(bucket, streamInBucket int) int { + return (bucket * bucketBits) + streamInBucket +} + +func (s *IDGenerator) GetStream() (int, bool) { + // based closely on the java-driver stream ID generator + // avoid false sharing subsequent requests. + offset := atomic.LoadUint32(&s.offset) + for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) { + offset = atomic.LoadUint32(&s.offset) + } + offset = (offset + 1) % s.numBuckets + + for i := uint32(0); i < s.numBuckets; i++ { + pos := int((i + offset) % s.numBuckets) + + bucket := atomic.LoadUint64(&s.streams[pos]) + if bucket == math.MaxUint64 { + // all streams in use + continue + } + + for j := 0; j < bucketBits; j++ { + mask := uint64(1 << streamOffset(j)) + for bucket&mask == 0 { + if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) { + atomic.AddInt32(&s.inuseStreams, 1) + return streamFromBucket(int(pos), j), true + } + bucket = atomic.LoadUint64(&s.streams[pos]) + } + } + } + + return 0, false +} + +func bitfmt(b uint64) string { + return strconv.FormatUint(b, 16) +} + +// returns the bucket offset of a given stream +func bucketOffset(i int) int { + return i / bucketBits +} + +func streamOffset(stream int) uint64 { + return bucketBits - uint64(stream%bucketBits) - 1 +} + +func isSet(bits uint64, stream int) bool { + return bits>>streamOffset(stream)&1 == 1 +} + +func (s *IDGenerator) isSet(stream int) bool { + bits := atomic.LoadUint64(&s.streams[bucketOffset(stream)]) + return isSet(bits, stream) +} + +func (s *IDGenerator) String() string { + size := s.numBuckets * (bucketBits + 1) + buf := make([]byte, 0, size) + for i := 0; i < int(s.numBuckets); i++ { + bits := atomic.LoadUint64(&s.streams[i]) + buf = append(buf, bitfmt(bits)...) + buf = append(buf, ' ') + } + return string(buf[: size-1 : size-1]) +} + +func (s *IDGenerator) Clear(stream int) (inuse bool) { + offset := bucketOffset(stream) + bucket := atomic.LoadUint64(&s.streams[offset]) + + mask := uint64(1) << streamOffset(stream) + if bucket&mask != mask { + // already cleared + return false + } + + for !atomic.CompareAndSwapUint64(&s.streams[offset], bucket, bucket & ^mask) { + bucket = atomic.LoadUint64(&s.streams[offset]) + if bucket&mask != mask { + // already cleared + return false + } + } + + // TODO: make this account for 0 stream being reserved + if atomic.AddInt32(&s.inuseStreams, -1) < 0 { + // TODO(zariel): remove this + panic("negative streams inuse") + } + + return true +} + +func (s *IDGenerator) Available() int { + return s.NumStreams - int(atomic.LoadInt32(&s.inuseStreams)) - 1 +} diff --git a/vendor/github.com/gocql/gocql/logger.go b/vendor/github.com/gocql/gocql/logger.go new file mode 100644 index 000000000..8ff66586e --- /dev/null +++ b/vendor/github.com/gocql/gocql/logger.go @@ -0,0 +1,40 @@ +package gocql + +import ( + "bytes" + "fmt" + "log" +) + +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +type nopLogger struct{} + +func (n nopLogger) Print(_ ...interface{}) {} + +func (n nopLogger) Printf(_ string, _ ...interface{}) {} + +func (n nopLogger) Println(_ ...interface{}) {} + +type testLogger struct { + capture bytes.Buffer +} + +func (l *testLogger) Print(v ...interface{}) { fmt.Fprint(&l.capture, v...) } +func (l *testLogger) Printf(format string, v ...interface{}) { fmt.Fprintf(&l.capture, format, v...) } +func (l *testLogger) Println(v ...interface{}) { fmt.Fprintln(&l.capture, v...) } +func (l *testLogger) String() string { return l.capture.String() } + +type defaultLogger struct{} + +func (l *defaultLogger) Print(v ...interface{}) { log.Print(v...) } +func (l *defaultLogger) Printf(format string, v ...interface{}) { log.Printf(format, v...) } +func (l *defaultLogger) Println(v ...interface{}) { log.Println(v...) } + +// Logger for logging messages. +// Deprecated: Use ClusterConfig.Logger instead. +var Logger StdLogger = &defaultLogger{} diff --git a/vendor/github.com/gocql/gocql/marshal.go b/vendor/github.com/gocql/gocql/marshal.go new file mode 100644 index 000000000..b887b3f2b --- /dev/null +++ b/vendor/github.com/gocql/gocql/marshal.go @@ -0,0 +1,2727 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "math/big" + "math/bits" + "net" + "reflect" + "strconv" + "strings" + "time" + + "gopkg.in/inf.v0" +) + +var ( + bigOne = big.NewInt(1) + emptyValue reflect.Value +) + +var ( + ErrorUDTUnavailable = errors.New("UDT are not available on protocols less than 3, please update config") +) + +// Marshaler is the interface implemented by objects that can marshal +// themselves into values understood by Cassandra. +type Marshaler interface { + MarshalCQL(info TypeInfo) ([]byte, error) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal +// a Cassandra specific description of themselves. +type Unmarshaler interface { + UnmarshalCQL(info TypeInfo, data []byte) error +} + +// Marshal returns the CQL encoding of the value for the Cassandra +// internal type described by the info parameter. +// +// nil is serialized as CQL null. +// If value implements Marshaler, its MarshalCQL method is called to marshal the data. +// If value is a pointer, the pointed-to value is marshaled. +// +// Supported conversions are as follows, other type combinations may be added in the future: +// +// CQL type | Go type (value) | Note +// varchar, ascii, blob, text | string, []byte | +// boolean | bool | +// tinyint, smallint, int | integer types | +// tinyint, smallint, int | string | formatted as base 10 number +// bigint, counter | integer types | +// bigint, counter | big.Int | +// bigint, counter | string | formatted as base 10 number +// float | float32 | +// double | float64 | +// decimal | inf.Dec | +// time | int64 | nanoseconds since start of day +// time | time.Duration | duration since start of day +// timestamp | int64 | milliseconds since Unix epoch +// timestamp | time.Time | +// list, set | slice, array | +// list, set | map[X]struct{} | +// map | map[X]Y | +// uuid, timeuuid | gocql.UUID | +// uuid, timeuuid | [16]byte | raw UUID bytes +// uuid, timeuuid | []byte | raw UUID bytes, length must be 16 bytes +// uuid, timeuuid | string | hex representation, see ParseUUID +// varint | integer types | +// varint | big.Int | +// varint | string | value of number in decimal notation +// inet | net.IP | +// inet | string | IPv4 or IPv6 address string +// tuple | slice, array | +// tuple | struct | fields are marshaled in order of declaration +// user-defined type | gocql.UDTMarshaler | MarshalUDT is called +// user-defined type | map[string]interface{} | +// user-defined type | struct | struct fields' cql tags are used for column names +// date | int64 | milliseconds since Unix epoch to start of day (in UTC) +// date | time.Time | start of day (in UTC) +// date | string | parsed using "2006-01-02" format +// duration | int64 | duration in nanoseconds +// duration | time.Duration | +// duration | gocql.Duration | +// duration | string | parsed with time.ParseDuration +func Marshal(info TypeInfo, value interface{}) ([]byte, error) { + if info.Version() < protoVersion1 { + panic("protocol version not set") + } + + if valueRef := reflect.ValueOf(value); valueRef.Kind() == reflect.Ptr { + if valueRef.IsNil() { + return nil, nil + } else if v, ok := value.(Marshaler); ok { + return v.MarshalCQL(info) + } else { + return Marshal(info, valueRef.Elem().Interface()) + } + } + + if v, ok := value.(Marshaler); ok { + return v.MarshalCQL(info) + } + + switch info.Type() { + case TypeVarchar, TypeAscii, TypeBlob, TypeText: + return marshalVarchar(info, value) + case TypeBoolean: + return marshalBool(info, value) + case TypeTinyInt: + return marshalTinyInt(info, value) + case TypeSmallInt: + return marshalSmallInt(info, value) + case TypeInt: + return marshalInt(info, value) + case TypeBigInt, TypeCounter: + return marshalBigInt(info, value) + case TypeFloat: + return marshalFloat(info, value) + case TypeDouble: + return marshalDouble(info, value) + case TypeDecimal: + return marshalDecimal(info, value) + case TypeTime: + return marshalTime(info, value) + case TypeTimestamp: + return marshalTimestamp(info, value) + case TypeList, TypeSet: + return marshalList(info, value) + case TypeMap: + return marshalMap(info, value) + case TypeUUID, TypeTimeUUID: + return marshalUUID(info, value) + case TypeVarint: + return marshalVarint(info, value) + case TypeInet: + return marshalInet(info, value) + case TypeTuple: + return marshalTuple(info, value) + case TypeUDT: + return marshalUDT(info, value) + case TypeDate: + return marshalDate(info, value) + case TypeDuration: + return marshalDuration(info, value) + } + + // detect protocol 2 UDT + if strings.HasPrefix(info.Custom(), "org.apache.cassandra.db.marshal.UserType") && info.Version() < 3 { + return nil, ErrorUDTUnavailable + } + + // TODO(tux21b): add the remaining types + return nil, fmt.Errorf("can not marshal %T into %s", value, info) +} + +// Unmarshal parses the CQL encoded data based on the info parameter that +// describes the Cassandra internal data type and stores the result in the +// value pointed by value. +// +// If value implements Unmarshaler, it's UnmarshalCQL method is called to +// unmarshal the data. +// If value is a pointer to pointer, it is set to nil if the CQL value is +// null. Otherwise, nulls are unmarshalled as zero value. +// +// Supported conversions are as follows, other type combinations may be added in the future: +// +// CQL type | Go type (value) | Note +// varchar, ascii, blob, text | *string | +// varchar, ascii, blob, text | *[]byte | non-nil buffer is reused +// bool | *bool | +// tinyint, smallint, int, bigint, counter | *integer types | +// tinyint, smallint, int, bigint, counter | *big.Int | +// tinyint, smallint, int, bigint, counter | *string | formatted as base 10 number +// float | *float32 | +// double | *float64 | +// decimal | *inf.Dec | +// time | *int64 | nanoseconds since start of day +// time | *time.Duration | +// timestamp | *int64 | milliseconds since Unix epoch +// timestamp | *time.Time | +// list, set | *slice, *array | +// map | *map[X]Y | +// uuid, timeuuid | *string | see UUID.String +// uuid, timeuuid | *[]byte | raw UUID bytes +// uuid, timeuuid | *gocql.UUID | +// timeuuid | *time.Time | timestamp of the UUID +// inet | *net.IP | +// inet | *string | IPv4 or IPv6 address string +// tuple | *slice, *array | +// tuple | *struct | struct fields are set in order of declaration +// user-defined types | gocql.UDTUnmarshaler | UnmarshalUDT is called +// user-defined types | *map[string]interface{} | +// user-defined types | *struct | cql tag is used to determine field name +// date | *time.Time | time of beginning of the day (in UTC) +// date | *string | formatted with 2006-01-02 format +// duration | *gocql.Duration | +func Unmarshal(info TypeInfo, data []byte, value interface{}) error { + if v, ok := value.(Unmarshaler); ok { + return v.UnmarshalCQL(info, data) + } + + if isNullableValue(value) { + return unmarshalNullable(info, data, value) + } + + switch info.Type() { + case TypeVarchar, TypeAscii, TypeBlob, TypeText: + return unmarshalVarchar(info, data, value) + case TypeBoolean: + return unmarshalBool(info, data, value) + case TypeInt: + return unmarshalInt(info, data, value) + case TypeBigInt, TypeCounter: + return unmarshalBigInt(info, data, value) + case TypeVarint: + return unmarshalVarint(info, data, value) + case TypeSmallInt: + return unmarshalSmallInt(info, data, value) + case TypeTinyInt: + return unmarshalTinyInt(info, data, value) + case TypeFloat: + return unmarshalFloat(info, data, value) + case TypeDouble: + return unmarshalDouble(info, data, value) + case TypeDecimal: + return unmarshalDecimal(info, data, value) + case TypeTime: + return unmarshalTime(info, data, value) + case TypeTimestamp: + return unmarshalTimestamp(info, data, value) + case TypeList, TypeSet: + return unmarshalList(info, data, value) + case TypeMap: + return unmarshalMap(info, data, value) + case TypeTimeUUID: + return unmarshalTimeUUID(info, data, value) + case TypeUUID: + return unmarshalUUID(info, data, value) + case TypeInet: + return unmarshalInet(info, data, value) + case TypeTuple: + return unmarshalTuple(info, data, value) + case TypeUDT: + return unmarshalUDT(info, data, value) + case TypeDate: + return unmarshalDate(info, data, value) + case TypeDuration: + return unmarshalDuration(info, data, value) + } + + // detect protocol 2 UDT + if strings.HasPrefix(info.Custom(), "org.apache.cassandra.db.marshal.UserType") && info.Version() < 3 { + return ErrorUDTUnavailable + } + + // TODO(tux21b): add the remaining types + return fmt.Errorf("can not unmarshal %s into %T", info, value) +} + +func isNullableValue(value interface{}) bool { + v := reflect.ValueOf(value) + return v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Ptr +} + +func isNullData(info TypeInfo, data []byte) bool { + return data == nil +} + +func unmarshalNullable(info TypeInfo, data []byte, value interface{}) error { + valueRef := reflect.ValueOf(value) + + if isNullData(info, data) { + nilValue := reflect.Zero(valueRef.Type().Elem()) + valueRef.Elem().Set(nilValue) + return nil + } + + newValue := reflect.New(valueRef.Type().Elem().Elem()) + valueRef.Elem().Set(newValue) + return Unmarshal(info, data, newValue.Interface()) +} + +func marshalVarchar(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case string: + return []byte(v), nil + case []byte: + return v, nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + t := rv.Type() + k := t.Kind() + switch { + case k == reflect.String: + return []byte(rv.String()), nil + case k == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + return rv.Bytes(), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalVarchar(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *string: + *v = string(data) + return nil + case *[]byte: + if data != nil { + *v = append((*v)[:0], data...) + } else { + *v = nil + } + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + t := rv.Type() + k := t.Kind() + switch { + case k == reflect.String: + rv.SetString(string(data)) + return nil + case k == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + var dataCopy []byte + if data != nil { + dataCopy = make([]byte, len(data)) + copy(dataCopy, data) + } + rv.SetBytes(dataCopy) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func marshalSmallInt(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int16: + return encShort(v), nil + case uint16: + return encShort(int16(v)), nil + case int8: + return encShort(int16(v)), nil + case uint8: + return encShort(int16(v)), nil + case int: + if v > math.MaxInt16 || v < math.MinInt16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case int32: + if v > math.MaxInt16 || v < math.MinInt16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case int64: + if v > math.MaxInt16 || v < math.MinInt16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case uint: + if v > math.MaxUint16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case uint32: + if v > math.MaxUint16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case uint64: + if v > math.MaxUint16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case string: + n, err := strconv.ParseInt(v, 10, 16) + if err != nil { + return nil, marshalErrorf("can not marshal %T into %s: %v", value, info, err) + } + return encShort(int16(n)), nil + } + + if value == nil { + return nil, nil + } + + switch rv := reflect.ValueOf(value); rv.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + v := rv.Int() + if v > math.MaxInt16 || v < math.MinInt16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + v := rv.Uint() + if v > math.MaxUint16 { + return nil, marshalErrorf("marshal smallint: value %d out of range", v) + } + return encShort(int16(v)), nil + case reflect.Ptr: + if rv.IsNil() { + return nil, nil + } + } + + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func marshalTinyInt(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int8: + return []byte{byte(v)}, nil + case uint8: + return []byte{byte(v)}, nil + case int16: + if v > math.MaxInt8 || v < math.MinInt8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case uint16: + if v > math.MaxUint8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case int: + if v > math.MaxInt8 || v < math.MinInt8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case int32: + if v > math.MaxInt8 || v < math.MinInt8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case int64: + if v > math.MaxInt8 || v < math.MinInt8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case uint: + if v > math.MaxUint8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case uint32: + if v > math.MaxUint8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case uint64: + if v > math.MaxUint8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case string: + n, err := strconv.ParseInt(v, 10, 8) + if err != nil { + return nil, marshalErrorf("can not marshal %T into %s: %v", value, info, err) + } + return []byte{byte(n)}, nil + } + + if value == nil { + return nil, nil + } + + switch rv := reflect.ValueOf(value); rv.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + v := rv.Int() + if v > math.MaxInt8 || v < math.MinInt8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + v := rv.Uint() + if v > math.MaxUint8 { + return nil, marshalErrorf("marshal tinyint: value %d out of range", v) + } + return []byte{byte(v)}, nil + case reflect.Ptr: + if rv.IsNil() { + return nil, nil + } + } + + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func marshalInt(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int: + if v > math.MaxInt32 || v < math.MinInt32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case uint: + if v > math.MaxUint32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case int64: + if v > math.MaxInt32 || v < math.MinInt32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case uint64: + if v > math.MaxUint32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case int32: + return encInt(v), nil + case uint32: + return encInt(int32(v)), nil + case int16: + return encInt(int32(v)), nil + case uint16: + return encInt(int32(v)), nil + case int8: + return encInt(int32(v)), nil + case uint8: + return encInt(int32(v)), nil + case string: + i, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return nil, marshalErrorf("can not marshal string to int: %s", err) + } + return encInt(int32(i)), nil + } + + if value == nil { + return nil, nil + } + + switch rv := reflect.ValueOf(value); rv.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + v := rv.Int() + if v > math.MaxInt32 || v < math.MinInt32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + v := rv.Uint() + if v > math.MaxInt32 { + return nil, marshalErrorf("marshal int: value %d out of range", v) + } + return encInt(int32(v)), nil + case reflect.Ptr: + if rv.IsNil() { + return nil, nil + } + } + + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func encInt(x int32) []byte { + return []byte{byte(x >> 24), byte(x >> 16), byte(x >> 8), byte(x)} +} + +func decInt(x []byte) int32 { + if len(x) != 4 { + return 0 + } + return int32(x[0])<<24 | int32(x[1])<<16 | int32(x[2])<<8 | int32(x[3]) +} + +func encShort(x int16) []byte { + p := make([]byte, 2) + p[0] = byte(x >> 8) + p[1] = byte(x) + return p +} + +func decShort(p []byte) int16 { + if len(p) != 2 { + return 0 + } + return int16(p[0])<<8 | int16(p[1]) +} + +func decTiny(p []byte) int8 { + if len(p) != 1 { + return 0 + } + return int8(p[0]) +} + +func marshalBigInt(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int: + return encBigInt(int64(v)), nil + case uint: + if uint64(v) > math.MaxInt64 { + return nil, marshalErrorf("marshal bigint: value %d out of range", v) + } + return encBigInt(int64(v)), nil + case int64: + return encBigInt(v), nil + case uint64: + return encBigInt(int64(v)), nil + case int32: + return encBigInt(int64(v)), nil + case uint32: + return encBigInt(int64(v)), nil + case int16: + return encBigInt(int64(v)), nil + case uint16: + return encBigInt(int64(v)), nil + case int8: + return encBigInt(int64(v)), nil + case uint8: + return encBigInt(int64(v)), nil + case big.Int: + return encBigInt2C(&v), nil + case string: + i, err := strconv.ParseInt(value.(string), 10, 64) + if err != nil { + return nil, marshalErrorf("can not marshal string to bigint: %s", err) + } + return encBigInt(i), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + v := rv.Int() + return encBigInt(v), nil + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + v := rv.Uint() + if v > math.MaxInt64 { + return nil, marshalErrorf("marshal bigint: value %d out of range", v) + } + return encBigInt(int64(v)), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func encBigInt(x int64) []byte { + return []byte{byte(x >> 56), byte(x >> 48), byte(x >> 40), byte(x >> 32), + byte(x >> 24), byte(x >> 16), byte(x >> 8), byte(x)} +} + +func bytesToInt64(data []byte) (ret int64) { + for i := range data { + ret |= int64(data[i]) << (8 * uint(len(data)-i-1)) + } + return ret +} + +func bytesToUint64(data []byte) (ret uint64) { + for i := range data { + ret |= uint64(data[i]) << (8 * uint(len(data)-i-1)) + } + return ret +} + +func unmarshalBigInt(info TypeInfo, data []byte, value interface{}) error { + return unmarshalIntlike(info, decBigInt(data), data, value) +} + +func unmarshalInt(info TypeInfo, data []byte, value interface{}) error { + return unmarshalIntlike(info, int64(decInt(data)), data, value) +} + +func unmarshalSmallInt(info TypeInfo, data []byte, value interface{}) error { + return unmarshalIntlike(info, int64(decShort(data)), data, value) +} + +func unmarshalTinyInt(info TypeInfo, data []byte, value interface{}) error { + return unmarshalIntlike(info, int64(decTiny(data)), data, value) +} + +func unmarshalVarint(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case *big.Int: + return unmarshalIntlike(info, 0, data, value) + case *uint64: + if len(data) == 9 && data[0] == 0 { + *v = bytesToUint64(data[1:]) + return nil + } + } + + if len(data) > 8 { + return unmarshalErrorf("unmarshal int: varint value %v out of range for %T (use big.Int)", data, value) + } + + int64Val := bytesToInt64(data) + if len(data) > 0 && len(data) < 8 && data[0]&0x80 > 0 { + int64Val -= (1 << uint(len(data)*8)) + } + return unmarshalIntlike(info, int64Val, data, value) +} + +func marshalVarint(info TypeInfo, value interface{}) ([]byte, error) { + var ( + retBytes []byte + err error + ) + + switch v := value.(type) { + case unsetColumn: + return nil, nil + case uint64: + if v > uint64(math.MaxInt64) { + retBytes = make([]byte, 9) + binary.BigEndian.PutUint64(retBytes[1:], v) + } else { + retBytes = make([]byte, 8) + binary.BigEndian.PutUint64(retBytes, v) + } + default: + retBytes, err = marshalBigInt(info, value) + } + + if err == nil { + // trim down to most significant byte + i := 0 + for ; i < len(retBytes)-1; i++ { + b0 := retBytes[i] + if b0 != 0 && b0 != 0xFF { + break + } + + b1 := retBytes[i+1] + if b0 == 0 && b1 != 0 { + if b1&0x80 == 0 { + i++ + } + break + } + + if b0 == 0xFF && b1 != 0xFF { + if b1&0x80 > 0 { + i++ + } + break + } + } + retBytes = retBytes[i:] + } + + return retBytes, err +} + +func unmarshalIntlike(info TypeInfo, int64Val int64, data []byte, value interface{}) error { + switch v := value.(type) { + case *int: + if ^uint(0) == math.MaxUint32 && (int64Val < math.MinInt32 || int64Val > math.MaxInt32) { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = int(int64Val) + return nil + case *uint: + unitVal := uint64(int64Val) + switch info.Type() { + case TypeInt: + *v = uint(unitVal) & 0xFFFFFFFF + case TypeSmallInt: + *v = uint(unitVal) & 0xFFFF + case TypeTinyInt: + *v = uint(unitVal) & 0xFF + default: + if ^uint(0) == math.MaxUint32 && (int64Val < 0 || int64Val > math.MaxUint32) { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", unitVal, *v) + } + *v = uint(unitVal) + } + return nil + case *int64: + *v = int64Val + return nil + case *uint64: + switch info.Type() { + case TypeInt: + *v = uint64(int64Val) & 0xFFFFFFFF + case TypeSmallInt: + *v = uint64(int64Val) & 0xFFFF + case TypeTinyInt: + *v = uint64(int64Val) & 0xFF + default: + *v = uint64(int64Val) + } + return nil + case *int32: + if int64Val < math.MinInt32 || int64Val > math.MaxInt32 { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = int32(int64Val) + return nil + case *uint32: + switch info.Type() { + case TypeInt: + *v = uint32(int64Val) & 0xFFFFFFFF + case TypeSmallInt: + *v = uint32(int64Val) & 0xFFFF + case TypeTinyInt: + *v = uint32(int64Val) & 0xFF + default: + if int64Val < 0 || int64Val > math.MaxUint32 { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = uint32(int64Val) & 0xFFFFFFFF + } + return nil + case *int16: + if int64Val < math.MinInt16 || int64Val > math.MaxInt16 { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = int16(int64Val) + return nil + case *uint16: + switch info.Type() { + case TypeSmallInt: + *v = uint16(int64Val) & 0xFFFF + case TypeTinyInt: + *v = uint16(int64Val) & 0xFF + default: + if int64Val < 0 || int64Val > math.MaxUint16 { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = uint16(int64Val) & 0xFFFF + } + return nil + case *int8: + if int64Val < math.MinInt8 || int64Val > math.MaxInt8 { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = int8(int64Val) + return nil + case *uint8: + if info.Type() != TypeTinyInt && (int64Val < 0 || int64Val > math.MaxUint8) { + return unmarshalErrorf("unmarshal int: value %d out of range for %T", int64Val, *v) + } + *v = uint8(int64Val) & 0xFF + return nil + case *big.Int: + decBigInt2C(data, v) + return nil + case *string: + *v = strconv.FormatInt(int64Val, 10) + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + + switch rv.Type().Kind() { + case reflect.Int: + if ^uint(0) == math.MaxUint32 && (int64Val < math.MinInt32 || int64Val > math.MaxInt32) { + return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) + } + rv.SetInt(int64Val) + return nil + case reflect.Int64: + rv.SetInt(int64Val) + return nil + case reflect.Int32: + if int64Val < math.MinInt32 || int64Val > math.MaxInt32 { + return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) + } + rv.SetInt(int64Val) + return nil + case reflect.Int16: + if int64Val < math.MinInt16 || int64Val > math.MaxInt16 { + return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) + } + rv.SetInt(int64Val) + return nil + case reflect.Int8: + if int64Val < math.MinInt8 || int64Val > math.MaxInt8 { + return unmarshalErrorf("unmarshal int: value %d out of range", int64Val) + } + rv.SetInt(int64Val) + return nil + case reflect.Uint: + unitVal := uint64(int64Val) + switch info.Type() { + case TypeInt: + rv.SetUint(unitVal & 0xFFFFFFFF) + case TypeSmallInt: + rv.SetUint(unitVal & 0xFFFF) + case TypeTinyInt: + rv.SetUint(unitVal & 0xFF) + default: + if ^uint(0) == math.MaxUint32 && (int64Val < 0 || int64Val > math.MaxUint32) { + return unmarshalErrorf("unmarshal int: value %d out of range for %s", unitVal, rv.Type()) + } + rv.SetUint(unitVal) + } + return nil + case reflect.Uint64: + unitVal := uint64(int64Val) + switch info.Type() { + case TypeInt: + rv.SetUint(unitVal & 0xFFFFFFFF) + case TypeSmallInt: + rv.SetUint(unitVal & 0xFFFF) + case TypeTinyInt: + rv.SetUint(unitVal & 0xFF) + default: + rv.SetUint(unitVal) + } + return nil + case reflect.Uint32: + unitVal := uint64(int64Val) + switch info.Type() { + case TypeInt: + rv.SetUint(unitVal & 0xFFFFFFFF) + case TypeSmallInt: + rv.SetUint(unitVal & 0xFFFF) + case TypeTinyInt: + rv.SetUint(unitVal & 0xFF) + default: + if int64Val < 0 || int64Val > math.MaxUint32 { + return unmarshalErrorf("unmarshal int: value %d out of range for %s", int64Val, rv.Type()) + } + rv.SetUint(unitVal & 0xFFFFFFFF) + } + return nil + case reflect.Uint16: + unitVal := uint64(int64Val) + switch info.Type() { + case TypeSmallInt: + rv.SetUint(unitVal & 0xFFFF) + case TypeTinyInt: + rv.SetUint(unitVal & 0xFF) + default: + if int64Val < 0 || int64Val > math.MaxUint16 { + return unmarshalErrorf("unmarshal int: value %d out of range for %s", int64Val, rv.Type()) + } + rv.SetUint(unitVal & 0xFFFF) + } + return nil + case reflect.Uint8: + if info.Type() != TypeTinyInt && (int64Val < 0 || int64Val > math.MaxUint8) { + return unmarshalErrorf("unmarshal int: value %d out of range for %s", int64Val, rv.Type()) + } + rv.SetUint(uint64(int64Val) & 0xff) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func decBigInt(data []byte) int64 { + if len(data) != 8 { + return 0 + } + return int64(data[0])<<56 | int64(data[1])<<48 | + int64(data[2])<<40 | int64(data[3])<<32 | + int64(data[4])<<24 | int64(data[5])<<16 | + int64(data[6])<<8 | int64(data[7]) +} + +func marshalBool(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case bool: + return encBool(v), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Bool: + return encBool(rv.Bool()), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func encBool(v bool) []byte { + if v { + return []byte{1} + } + return []byte{0} +} + +func unmarshalBool(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *bool: + *v = decBool(data) + return nil + } + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + switch rv.Type().Kind() { + case reflect.Bool: + rv.SetBool(decBool(data)) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func decBool(v []byte) bool { + if len(v) == 0 { + return false + } + return v[0] != 0 +} + +func marshalFloat(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case float32: + return encInt(int32(math.Float32bits(v))), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Float32: + return encInt(int32(math.Float32bits(float32(rv.Float())))), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalFloat(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *float32: + *v = math.Float32frombits(uint32(decInt(data))) + return nil + } + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + switch rv.Type().Kind() { + case reflect.Float32: + rv.SetFloat(float64(math.Float32frombits(uint32(decInt(data))))) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func marshalDouble(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case float64: + return encBigInt(int64(math.Float64bits(v))), nil + } + if value == nil { + return nil, nil + } + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Float64: + return encBigInt(int64(math.Float64bits(rv.Float()))), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalDouble(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *float64: + *v = math.Float64frombits(uint64(decBigInt(data))) + return nil + } + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + switch rv.Type().Kind() { + case reflect.Float64: + rv.SetFloat(math.Float64frombits(uint64(decBigInt(data)))) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func marshalDecimal(info TypeInfo, value interface{}) ([]byte, error) { + if value == nil { + return nil, nil + } + + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case inf.Dec: + unscaled := encBigInt2C(v.UnscaledBig()) + if unscaled == nil { + return nil, marshalErrorf("can not marshal %T into %s", value, info) + } + + buf := make([]byte, 4+len(unscaled)) + copy(buf[0:4], encInt(int32(v.Scale()))) + copy(buf[4:], unscaled) + return buf, nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalDecimal(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *inf.Dec: + if len(data) < 4 { + return unmarshalErrorf("inf.Dec needs at least 4 bytes, while value has only %d", len(data)) + } + scale := decInt(data[0:4]) + unscaled := decBigInt2C(data[4:], nil) + *v = *inf.NewDecBig(unscaled, inf.Scale(scale)) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +// decBigInt2C sets the value of n to the big-endian two's complement +// value stored in the given data. If data[0]&80 != 0, the number +// is negative. If data is empty, the result will be 0. +func decBigInt2C(data []byte, n *big.Int) *big.Int { + if n == nil { + n = new(big.Int) + } + n.SetBytes(data) + if len(data) > 0 && data[0]&0x80 > 0 { + n.Sub(n, new(big.Int).Lsh(bigOne, uint(len(data))*8)) + } + return n +} + +// encBigInt2C returns the big-endian two's complement +// form of n. +func encBigInt2C(n *big.Int) []byte { + switch n.Sign() { + case 0: + return []byte{0} + case 1: + b := n.Bytes() + if b[0]&0x80 > 0 { + b = append([]byte{0}, b...) + } + return b + case -1: + length := uint(n.BitLen()/8+1) * 8 + b := new(big.Int).Add(n, new(big.Int).Lsh(bigOne, length)).Bytes() + // When the most significant bit is on a byte + // boundary, we can get some extra significant + // bits, so strip them off when that happens. + if len(b) >= 2 && b[0] == 0xff && b[1]&0x80 != 0 { + b = b[1:] + } + return b + } + return nil +} + +func marshalTime(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int64: + return encBigInt(v), nil + case time.Duration: + return encBigInt(v.Nanoseconds()), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Int64: + return encBigInt(rv.Int()), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func marshalTimestamp(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int64: + return encBigInt(v), nil + case time.Time: + if v.IsZero() { + return []byte{}, nil + } + x := int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) + return encBigInt(x), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Int64: + return encBigInt(rv.Int()), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalTime(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *int64: + *v = decBigInt(data) + return nil + case *time.Duration: + *v = time.Duration(decBigInt(data)) + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + switch rv.Type().Kind() { + case reflect.Int64: + rv.SetInt(decBigInt(data)) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func unmarshalTimestamp(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *int64: + *v = decBigInt(data) + return nil + case *time.Time: + if len(data) == 0 { + *v = time.Time{} + return nil + } + x := decBigInt(data) + sec := x / 1000 + nsec := (x - sec*1000) * 1000000 + *v = time.Unix(sec, nsec).In(time.UTC) + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + switch rv.Type().Kind() { + case reflect.Int64: + rv.SetInt(decBigInt(data)) + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +const millisecondsInADay int64 = 24 * 60 * 60 * 1000 + +func marshalDate(info TypeInfo, value interface{}) ([]byte, error) { + var timestamp int64 + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int64: + timestamp = v + x := timestamp/millisecondsInADay + int64(1<<31) + return encInt(int32(x)), nil + case time.Time: + if v.IsZero() { + return []byte{}, nil + } + timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) + x := timestamp/millisecondsInADay + int64(1<<31) + return encInt(int32(x)), nil + case *time.Time: + if v.IsZero() { + return []byte{}, nil + } + timestamp = int64(v.UTC().Unix()*1e3) + int64(v.UTC().Nanosecond()/1e6) + x := timestamp/millisecondsInADay + int64(1<<31) + return encInt(int32(x)), nil + case string: + if v == "" { + return []byte{}, nil + } + t, err := time.Parse("2006-01-02", v) + if err != nil { + return nil, marshalErrorf("can not marshal %T into %s, date layout must be '2006-01-02'", value, info) + } + timestamp = int64(t.UTC().Unix()*1e3) + int64(t.UTC().Nanosecond()/1e6) + x := timestamp/millisecondsInADay + int64(1<<31) + return encInt(int32(x)), nil + } + + if value == nil { + return nil, nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalDate(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *time.Time: + if len(data) == 0 { + *v = time.Time{} + return nil + } + var origin uint32 = 1 << 31 + var current uint32 = binary.BigEndian.Uint32(data) + timestamp := (int64(current) - int64(origin)) * millisecondsInADay + *v = time.UnixMilli(timestamp).In(time.UTC) + return nil + case *string: + if len(data) == 0 { + *v = "" + return nil + } + var origin uint32 = 1 << 31 + var current uint32 = binary.BigEndian.Uint32(data) + timestamp := (int64(current) - int64(origin)) * millisecondsInADay + *v = time.UnixMilli(timestamp).In(time.UTC).Format("2006-01-02") + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func marshalDuration(info TypeInfo, value interface{}) ([]byte, error) { + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, nil + case int64: + return encVints(0, 0, v), nil + case time.Duration: + return encVints(0, 0, v.Nanoseconds()), nil + case string: + d, err := time.ParseDuration(v) + if err != nil { + return nil, err + } + return encVints(0, 0, d.Nanoseconds()), nil + case Duration: + return encVints(v.Months, v.Days, v.Nanoseconds), nil + } + + if value == nil { + return nil, nil + } + + rv := reflect.ValueOf(value) + switch rv.Type().Kind() { + case reflect.Int64: + return encBigInt(rv.Int()), nil + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalDuration(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *Duration: + if len(data) == 0 { + *v = Duration{ + Months: 0, + Days: 0, + Nanoseconds: 0, + } + return nil + } + months, days, nanos, err := decVints(data) + if err != nil { + return unmarshalErrorf("failed to unmarshal %s into %T: %s", info, value, err.Error()) + } + *v = Duration{ + Months: months, + Days: days, + Nanoseconds: nanos, + } + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func decVints(data []byte) (int32, int32, int64, error) { + month, i, err := decVint(data, 0) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract month: %s", err.Error()) + } + days, i, err := decVint(data, i) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract days: %s", err.Error()) + } + nanos, _, err := decVint(data, i) + if err != nil { + return 0, 0, 0, fmt.Errorf("failed to extract nanoseconds: %s", err.Error()) + } + return int32(month), int32(days), nanos, err +} + +func decVint(data []byte, start int) (int64, int, error) { + if len(data) <= start { + return 0, 0, errors.New("unexpected eof") + } + firstByte := data[start] + if firstByte&0x80 == 0 { + return decIntZigZag(uint64(firstByte)), start + 1, nil + } + numBytes := bits.LeadingZeros32(uint32(^firstByte)) - 24 + ret := uint64(firstByte & (0xff >> uint(numBytes))) + if len(data) < start+numBytes+1 { + return 0, 0, fmt.Errorf("data expect to have %d bytes, but it has only %d", start+numBytes+1, len(data)) + } + for i := start; i < start+numBytes; i++ { + ret <<= 8 + ret |= uint64(data[i+1] & 0xff) + } + return decIntZigZag(ret), start + numBytes + 1, nil +} + +func decIntZigZag(n uint64) int64 { + return int64((n >> 1) ^ -(n & 1)) +} + +func encIntZigZag(n int64) uint64 { + return uint64((n >> 63) ^ (n << 1)) +} + +func encVints(months int32, seconds int32, nanos int64) []byte { + buf := append(encVint(int64(months)), encVint(int64(seconds))...) + return append(buf, encVint(nanos)...) +} + +func encVint(v int64) []byte { + vEnc := encIntZigZag(v) + lead0 := bits.LeadingZeros64(vEnc) + numBytes := (639 - lead0*9) >> 6 + + // It can be 1 or 0 is v ==0 + if numBytes <= 1 { + return []byte{byte(vEnc)} + } + extraBytes := numBytes - 1 + var buf = make([]byte, numBytes) + for i := extraBytes; i >= 0; i-- { + buf[i] = byte(vEnc) + vEnc >>= 8 + } + buf[0] |= byte(^(0xff >> uint(extraBytes))) + return buf +} + +func writeCollectionSize(info CollectionType, n int, buf *bytes.Buffer) error { + if info.proto > protoVersion2 { + if n > math.MaxInt32 { + return marshalErrorf("marshal: collection too large") + } + + buf.WriteByte(byte(n >> 24)) + buf.WriteByte(byte(n >> 16)) + buf.WriteByte(byte(n >> 8)) + buf.WriteByte(byte(n)) + } else { + if n > math.MaxUint16 { + return marshalErrorf("marshal: collection too large") + } + + buf.WriteByte(byte(n >> 8)) + buf.WriteByte(byte(n)) + } + + return nil +} + +func marshalList(info TypeInfo, value interface{}) ([]byte, error) { + listInfo, ok := info.(CollectionType) + if !ok { + return nil, marshalErrorf("marshal: can not marshal non collection type into list") + } + + if value == nil { + return nil, nil + } else if _, ok := value.(unsetColumn); ok { + return nil, nil + } + + rv := reflect.ValueOf(value) + t := rv.Type() + k := t.Kind() + if k == reflect.Slice && rv.IsNil() { + return nil, nil + } + + switch k { + case reflect.Slice, reflect.Array: + buf := &bytes.Buffer{} + n := rv.Len() + + if err := writeCollectionSize(listInfo, n, buf); err != nil { + return nil, err + } + + for i := 0; i < n; i++ { + item, err := Marshal(listInfo.Elem, rv.Index(i).Interface()) + if err != nil { + return nil, err + } + itemLen := len(item) + // Set the value to null for supported protocols + if item == nil && listInfo.proto > protoVersion2 { + itemLen = -1 + } + if err := writeCollectionSize(listInfo, itemLen, buf); err != nil { + return nil, err + } + buf.Write(item) + } + return buf.Bytes(), nil + case reflect.Map: + elem := t.Elem() + if elem.Kind() == reflect.Struct && elem.NumField() == 0 { + rkeys := rv.MapKeys() + keys := make([]interface{}, len(rkeys)) + for i := 0; i < len(keys); i++ { + keys[i] = rkeys[i].Interface() + } + return marshalList(listInfo, keys) + } + } + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func readCollectionSize(info CollectionType, data []byte) (size, read int, err error) { + if info.proto > protoVersion2 { + if len(data) < 4 { + return 0, 0, unmarshalErrorf("unmarshal list: unexpected eof") + } + size = int(int32(data[0])<<24 | int32(data[1])<<16 | int32(data[2])<<8 | int32(data[3])) + read = 4 + } else { + if len(data) < 2 { + return 0, 0, unmarshalErrorf("unmarshal list: unexpected eof") + } + size = int(data[0])<<8 | int(data[1]) + read = 2 + } + return +} + +func unmarshalList(info TypeInfo, data []byte, value interface{}) error { + listInfo, ok := info.(CollectionType) + if !ok { + return unmarshalErrorf("unmarshal: can not unmarshal none collection type into list") + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + t := rv.Type() + k := t.Kind() + + switch k { + case reflect.Slice, reflect.Array: + if data == nil { + if k == reflect.Array { + return unmarshalErrorf("unmarshal list: can not store nil in array value") + } + if rv.IsNil() { + return nil + } + rv.Set(reflect.Zero(t)) + return nil + } + n, p, err := readCollectionSize(listInfo, data) + if err != nil { + return err + } + data = data[p:] + if k == reflect.Array { + if rv.Len() != n { + return unmarshalErrorf("unmarshal list: array with wrong size") + } + } else { + rv.Set(reflect.MakeSlice(t, n, n)) + } + for i := 0; i < n; i++ { + m, p, err := readCollectionSize(listInfo, data) + if err != nil { + return err + } + data = data[p:] + // In case m < 0, the value is null, and unmarshalData should be nil. + var unmarshalData []byte + if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal list: unexpected eof") + } + unmarshalData = data[:m] + data = data[m:] + } + if err := Unmarshal(listInfo.Elem, unmarshalData, rv.Index(i).Addr().Interface()); err != nil { + return err + } + } + return nil + } + return unmarshalErrorf("can not unmarshal %s into %T", info, value) +} + +func marshalMap(info TypeInfo, value interface{}) ([]byte, error) { + mapInfo, ok := info.(CollectionType) + if !ok { + return nil, marshalErrorf("marshal: can not marshal none collection type into map") + } + + if value == nil { + return nil, nil + } else if _, ok := value.(unsetColumn); ok { + return nil, nil + } + + rv := reflect.ValueOf(value) + + t := rv.Type() + if t.Kind() != reflect.Map { + return nil, marshalErrorf("can not marshal %T into %s", value, info) + } + + if rv.IsNil() { + return nil, nil + } + + buf := &bytes.Buffer{} + n := rv.Len() + + if err := writeCollectionSize(mapInfo, n, buf); err != nil { + return nil, err + } + + keys := rv.MapKeys() + for _, key := range keys { + item, err := Marshal(mapInfo.Key, key.Interface()) + if err != nil { + return nil, err + } + itemLen := len(item) + // Set the key to null for supported protocols + if item == nil && mapInfo.proto > protoVersion2 { + itemLen = -1 + } + if err := writeCollectionSize(mapInfo, itemLen, buf); err != nil { + return nil, err + } + buf.Write(item) + + item, err = Marshal(mapInfo.Elem, rv.MapIndex(key).Interface()) + if err != nil { + return nil, err + } + itemLen = len(item) + // Set the value to null for supported protocols + if item == nil && mapInfo.proto > protoVersion2 { + itemLen = -1 + } + if err := writeCollectionSize(mapInfo, itemLen, buf); err != nil { + return nil, err + } + buf.Write(item) + } + return buf.Bytes(), nil +} + +func unmarshalMap(info TypeInfo, data []byte, value interface{}) error { + mapInfo, ok := info.(CollectionType) + if !ok { + return unmarshalErrorf("unmarshal: can not unmarshal none collection type into map") + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + rv = rv.Elem() + t := rv.Type() + if t.Kind() != reflect.Map { + return unmarshalErrorf("can not unmarshal %s into %T", info, value) + } + if data == nil { + rv.Set(reflect.Zero(t)) + return nil + } + n, p, err := readCollectionSize(mapInfo, data) + if err != nil { + return err + } + if n < 0 { + return unmarshalErrorf("negative map size %d", n) + } + rv.Set(reflect.MakeMapWithSize(t, n)) + data = data[p:] + for i := 0; i < n; i++ { + m, p, err := readCollectionSize(mapInfo, data) + if err != nil { + return err + } + data = data[p:] + key := reflect.New(t.Key()) + // In case m < 0, the key is null, and unmarshalData should be nil. + var unmarshalData []byte + if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal map: unexpected eof") + } + unmarshalData = data[:m] + data = data[m:] + } + if err := Unmarshal(mapInfo.Key, unmarshalData, key.Interface()); err != nil { + return err + } + + m, p, err = readCollectionSize(mapInfo, data) + if err != nil { + return err + } + data = data[p:] + val := reflect.New(t.Elem()) + + // In case m < 0, the value is null, and unmarshalData should be nil. + unmarshalData = nil + if m >= 0 { + if len(data) < m { + return unmarshalErrorf("unmarshal map: unexpected eof") + } + unmarshalData = data[:m] + data = data[m:] + } + if err := Unmarshal(mapInfo.Elem, unmarshalData, val.Interface()); err != nil { + return err + } + + rv.SetMapIndex(key.Elem(), val.Elem()) + } + return nil +} + +func marshalUUID(info TypeInfo, value interface{}) ([]byte, error) { + switch val := value.(type) { + case unsetColumn: + return nil, nil + case UUID: + return val.Bytes(), nil + case [16]byte: + return val[:], nil + case []byte: + if len(val) != 16 { + return nil, marshalErrorf("can not marshal []byte %d bytes long into %s, must be exactly 16 bytes long", len(val), info) + } + return val, nil + case string: + b, err := ParseUUID(val) + if err != nil { + return nil, err + } + return b[:], nil + } + + if value == nil { + return nil, nil + } + + return nil, marshalErrorf("can not marshal %T into %s", value, info) +} + +func unmarshalUUID(info TypeInfo, data []byte, value interface{}) error { + if len(data) == 0 { + switch v := value.(type) { + case *string: + *v = "" + case *[]byte: + *v = nil + case *UUID: + *v = UUID{} + default: + return unmarshalErrorf("can not unmarshal X %s into %T", info, value) + } + + return nil + } + + if len(data) != 16 { + return unmarshalErrorf("unable to parse UUID: UUIDs must be exactly 16 bytes long") + } + + switch v := value.(type) { + case *[16]byte: + copy((*v)[:], data) + return nil + case *UUID: + copy((*v)[:], data) + return nil + } + + u, err := UUIDFromBytes(data) + if err != nil { + return unmarshalErrorf("unable to parse UUID: %s", err) + } + + switch v := value.(type) { + case *string: + *v = u.String() + return nil + case *[]byte: + *v = u[:] + return nil + } + return unmarshalErrorf("can not unmarshal X %s into %T", info, value) +} + +func unmarshalTimeUUID(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *time.Time: + id, err := UUIDFromBytes(data) + if err != nil { + return err + } else if id.Version() != 1 { + return unmarshalErrorf("invalid timeuuid") + } + *v = id.Time() + return nil + default: + return unmarshalUUID(info, data, value) + } +} + +func marshalInet(info TypeInfo, value interface{}) ([]byte, error) { + // we return either the 4 or 16 byte representation of an + // ip address here otherwise the db value will be prefixed + // with the remaining byte values e.g. ::ffff:127.0.0.1 and not 127.0.0.1 + switch val := value.(type) { + case unsetColumn: + return nil, nil + case net.IP: + t := val.To4() + if t == nil { + return val.To16(), nil + } + return t, nil + case string: + b := net.ParseIP(val) + if b != nil { + t := b.To4() + if t == nil { + return b.To16(), nil + } + return t, nil + } + return nil, marshalErrorf("cannot marshal. invalid ip string %s", val) + } + + if value == nil { + return nil, nil + } + + return nil, marshalErrorf("cannot marshal %T into %s", value, info) +} + +func unmarshalInet(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case *net.IP: + if x := len(data); !(x == 4 || x == 16) { + return unmarshalErrorf("cannot unmarshal %s into %T: invalid sized IP: got %d bytes not 4 or 16", info, value, x) + } + buf := copyBytes(data) + ip := net.IP(buf) + if v4 := ip.To4(); v4 != nil { + *v = v4 + return nil + } + *v = ip + return nil + case *string: + if len(data) == 0 { + *v = "" + return nil + } + ip := net.IP(data) + if v4 := ip.To4(); v4 != nil { + *v = v4.String() + return nil + } + *v = ip.String() + return nil + } + return unmarshalErrorf("cannot unmarshal %s into %T", info, value) +} + +func marshalTuple(info TypeInfo, value interface{}) ([]byte, error) { + tuple := info.(TupleTypeInfo) + switch v := value.(type) { + case unsetColumn: + return nil, unmarshalErrorf("Invalid request: UnsetValue is unsupported for tuples") + case []interface{}: + if len(v) != len(tuple.Elems) { + return nil, unmarshalErrorf("cannont marshal tuple: wrong number of elements") + } + + var buf []byte + for i, elem := range v { + if elem == nil { + buf = appendInt(buf, int32(-1)) + continue + } + + data, err := Marshal(tuple.Elems[i], elem) + if err != nil { + return nil, err + } + + n := len(data) + buf = appendInt(buf, int32(n)) + buf = append(buf, data...) + } + + return buf, nil + } + + rv := reflect.ValueOf(value) + t := rv.Type() + k := t.Kind() + + switch k { + case reflect.Struct: + if v := t.NumField(); v != len(tuple.Elems) { + return nil, marshalErrorf("can not marshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) + } + + var buf []byte + for i, elem := range tuple.Elems { + field := rv.Field(i) + + if field.Kind() == reflect.Ptr && field.IsNil() { + buf = appendInt(buf, int32(-1)) + continue + } + + data, err := Marshal(elem, field.Interface()) + if err != nil { + return nil, err + } + + n := len(data) + buf = appendInt(buf, int32(n)) + buf = append(buf, data...) + } + + return buf, nil + case reflect.Slice, reflect.Array: + size := rv.Len() + if size != len(tuple.Elems) { + return nil, marshalErrorf("can not marshal tuple into %v of length %d need %d elements", k, size, len(tuple.Elems)) + } + + var buf []byte + for i, elem := range tuple.Elems { + item := rv.Index(i) + + if item.Kind() == reflect.Ptr && item.IsNil() { + buf = appendInt(buf, int32(-1)) + continue + } + + data, err := Marshal(elem, item.Interface()) + if err != nil { + return nil, err + } + + n := len(data) + buf = appendInt(buf, int32(n)) + buf = append(buf, data...) + } + + return buf, nil + } + + return nil, marshalErrorf("cannot marshal %T into %s", value, tuple) +} + +func readBytes(p []byte) ([]byte, []byte) { + // TODO: really should use a framer + size := readInt(p) + p = p[4:] + if size < 0 { + return nil, p + } + return p[:size], p[size:] +} + +// currently only support unmarshal into a list of values, this makes it possible +// to support tuples without changing the query API. In the future this can be extend +// to allow unmarshalling into custom tuple types. +func unmarshalTuple(info TypeInfo, data []byte, value interface{}) error { + if v, ok := value.(Unmarshaler); ok { + return v.UnmarshalCQL(info, data) + } + + tuple := info.(TupleTypeInfo) + switch v := value.(type) { + case []interface{}: + for i, elem := range tuple.Elems { + // each element inside data is a [bytes] + var p []byte + if len(data) >= 4 { + p, data = readBytes(data) + } + err := Unmarshal(elem, p, v[i]) + if err != nil { + return err + } + } + + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + + rv = rv.Elem() + t := rv.Type() + k := t.Kind() + + switch k { + case reflect.Struct: + if v := t.NumField(); v != len(tuple.Elems) { + return unmarshalErrorf("can not unmarshal tuple into struct %v, not enough fields have %d need %d", t, v, len(tuple.Elems)) + } + + for i, elem := range tuple.Elems { + var p []byte + if len(data) >= 4 { + p, data = readBytes(data) + } + + v, err := elem.NewWithError() + if err != nil { + return err + } + if err := Unmarshal(elem, p, v); err != nil { + return err + } + + switch rv.Field(i).Kind() { + case reflect.Ptr: + if p != nil { + rv.Field(i).Set(reflect.ValueOf(v)) + } else { + rv.Field(i).Set(reflect.Zero(reflect.TypeOf(v))) + } + default: + rv.Field(i).Set(reflect.ValueOf(v).Elem()) + } + } + + return nil + case reflect.Slice, reflect.Array: + if k == reflect.Array { + size := rv.Len() + if size != len(tuple.Elems) { + return unmarshalErrorf("can not unmarshal tuple into array of length %d need %d elements", size, len(tuple.Elems)) + } + } else { + rv.Set(reflect.MakeSlice(t, len(tuple.Elems), len(tuple.Elems))) + } + + for i, elem := range tuple.Elems { + var p []byte + if len(data) >= 4 { + p, data = readBytes(data) + } + + v, err := elem.NewWithError() + if err != nil { + return err + } + if err := Unmarshal(elem, p, v); err != nil { + return err + } + + switch rv.Index(i).Kind() { + case reflect.Ptr: + if p != nil { + rv.Index(i).Set(reflect.ValueOf(v)) + } else { + rv.Index(i).Set(reflect.Zero(reflect.TypeOf(v))) + } + default: + rv.Index(i).Set(reflect.ValueOf(v).Elem()) + } + } + + return nil + } + + return unmarshalErrorf("cannot unmarshal %s into %T", info, value) +} + +// UDTMarshaler is an interface which should be implemented by users wishing to +// handle encoding UDT types to sent to Cassandra. Note: due to current implentations +// methods defined for this interface must be value receivers not pointer receivers. +type UDTMarshaler interface { + // MarshalUDT will be called for each field in the the UDT returned by Cassandra, + // the implementor should marshal the type to return by for example calling + // Marshal. + MarshalUDT(name string, info TypeInfo) ([]byte, error) +} + +// UDTUnmarshaler should be implemented by users wanting to implement custom +// UDT unmarshaling. +type UDTUnmarshaler interface { + // UnmarshalUDT will be called for each field in the UDT return by Cassandra, + // the implementor should unmarshal the data into the value of their chosing, + // for example by calling Unmarshal. + UnmarshalUDT(name string, info TypeInfo, data []byte) error +} + +func marshalUDT(info TypeInfo, value interface{}) ([]byte, error) { + udt := info.(UDTTypeInfo) + + switch v := value.(type) { + case Marshaler: + return v.MarshalCQL(info) + case unsetColumn: + return nil, unmarshalErrorf("invalid request: UnsetValue is unsupported for user defined types") + case UDTMarshaler: + var buf []byte + for _, e := range udt.Elements { + data, err := v.MarshalUDT(e.Name, e.Type) + if err != nil { + return nil, err + } + + buf = appendBytes(buf, data) + } + + return buf, nil + case map[string]interface{}: + var buf []byte + for _, e := range udt.Elements { + val, ok := v[e.Name] + + var data []byte + + if ok { + var err error + data, err = Marshal(e.Type, val) + if err != nil { + return nil, err + } + } + + buf = appendBytes(buf, data) + } + + return buf, nil + } + + k := reflect.ValueOf(value) + if k.Kind() == reflect.Ptr { + if k.IsNil() { + return nil, marshalErrorf("cannot marshal %T into %s", value, info) + } + k = k.Elem() + } + + if k.Kind() != reflect.Struct || !k.IsValid() { + return nil, marshalErrorf("cannot marshal %T into %s", value, info) + } + + fields := make(map[string]reflect.Value) + t := reflect.TypeOf(value) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + + if tag := sf.Tag.Get("cql"); tag != "" { + fields[tag] = k.Field(i) + } + } + + var buf []byte + for _, e := range udt.Elements { + f, ok := fields[e.Name] + if !ok { + f = k.FieldByName(e.Name) + } + + var data []byte + if f.IsValid() && f.CanInterface() { + var err error + data, err = Marshal(e.Type, f.Interface()) + if err != nil { + return nil, err + } + } + + buf = appendBytes(buf, data) + } + + return buf, nil +} + +func unmarshalUDT(info TypeInfo, data []byte, value interface{}) error { + switch v := value.(type) { + case Unmarshaler: + return v.UnmarshalCQL(info, data) + case UDTUnmarshaler: + udt := info.(UDTTypeInfo) + + for id, e := range udt.Elements { + if len(data) == 0 { + return nil + } + if len(data) < 4 { + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) + } + + var p []byte + p, data = readBytes(data) + if err := v.UnmarshalUDT(e.Name, e.Type, p); err != nil { + return err + } + } + + return nil + case *map[string]interface{}: + udt := info.(UDTTypeInfo) + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + + rv = rv.Elem() + t := rv.Type() + if t.Kind() != reflect.Map { + return unmarshalErrorf("can not unmarshal %s into %T", info, value) + } else if data == nil { + rv.Set(reflect.Zero(t)) + return nil + } + + rv.Set(reflect.MakeMap(t)) + m := *v + + for id, e := range udt.Elements { + if len(data) == 0 { + return nil + } + if len(data) < 4 { + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) + } + + valType, err := goType(e.Type) + if err != nil { + return unmarshalErrorf("can not unmarshal %s: %v", info, err) + } + + val := reflect.New(valType) + + var p []byte + p, data = readBytes(data) + + if err := Unmarshal(e.Type, p, val.Interface()); err != nil { + return err + } + + m[e.Name] = val.Elem().Interface() + } + + return nil + } + + rv := reflect.ValueOf(value) + if rv.Kind() != reflect.Ptr { + return unmarshalErrorf("can not unmarshal into non-pointer %T", value) + } + k := rv.Elem() + if k.Kind() != reflect.Struct || !k.IsValid() { + return unmarshalErrorf("cannot unmarshal %s into %T", info, value) + } + + if len(data) == 0 { + if k.CanSet() { + k.Set(reflect.Zero(k.Type())) + } + + return nil + } + + t := k.Type() + fields := make(map[string]reflect.Value, t.NumField()) + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + + if tag := sf.Tag.Get("cql"); tag != "" { + fields[tag] = k.Field(i) + } + } + + udt := info.(UDTTypeInfo) + for id, e := range udt.Elements { + if len(data) == 0 { + return nil + } + if len(data) < 4 { + // UDT def does not match the column value + return unmarshalErrorf("can not unmarshal %s: field [%d]%s: unexpected eof", info, id, e.Name) + } + + var p []byte + p, data = readBytes(data) + + f, ok := fields[e.Name] + if !ok { + f = k.FieldByName(e.Name) + if f == emptyValue { + // skip fields which exist in the UDT but not in + // the struct passed in + continue + } + } + + if !f.IsValid() || !f.CanAddr() { + return unmarshalErrorf("cannot unmarshal %s into %T: field %v is not valid", info, value, e.Name) + } + + fk := f.Addr().Interface() + if err := Unmarshal(e.Type, p, fk); err != nil { + return err + } + } + + return nil +} + +// TypeInfo describes a Cassandra specific data type. +type TypeInfo interface { + Type() Type + Version() byte + Custom() string + + // New creates a pointer to an empty version of whatever type + // is referenced by the TypeInfo receiver. + // + // If there is no corresponding Go type for the CQL type, New panics. + // + // Deprecated: Use NewWithError instead. + New() interface{} + + // NewWithError creates a pointer to an empty version of whatever type + // is referenced by the TypeInfo receiver. + // + // If there is no corresponding Go type for the CQL type, NewWithError returns an error. + NewWithError() (interface{}, error) +} + +type NativeType struct { + proto byte + typ Type + custom string // only used for TypeCustom +} + +func NewNativeType(proto byte, typ Type, custom string) NativeType { + return NativeType{proto, typ, custom} +} + +func (t NativeType) NewWithError() (interface{}, error) { + typ, err := goType(t) + if err != nil { + return nil, err + } + return reflect.New(typ).Interface(), nil +} + +func (t NativeType) New() interface{} { + val, err := t.NewWithError() + if err != nil { + panic(err.Error()) + } + return val +} + +func (s NativeType) Type() Type { + return s.typ +} + +func (s NativeType) Version() byte { + return s.proto +} + +func (s NativeType) Custom() string { + return s.custom +} + +func (s NativeType) String() string { + switch s.typ { + case TypeCustom: + return fmt.Sprintf("%s(%s)", s.typ, s.custom) + default: + return s.typ.String() + } +} + +type CollectionType struct { + NativeType + Key TypeInfo // only used for TypeMap + Elem TypeInfo // only used for TypeMap, TypeList and TypeSet +} + +func (t CollectionType) NewWithError() (interface{}, error) { + typ, err := goType(t) + if err != nil { + return nil, err + } + return reflect.New(typ).Interface(), nil +} + +func (t CollectionType) New() interface{} { + val, err := t.NewWithError() + if err != nil { + panic(err.Error()) + } + return val +} + +func (c CollectionType) String() string { + switch c.typ { + case TypeMap: + return fmt.Sprintf("%s(%s, %s)", c.typ, c.Key, c.Elem) + case TypeList, TypeSet: + return fmt.Sprintf("%s(%s)", c.typ, c.Elem) + case TypeCustom: + return fmt.Sprintf("%s(%s)", c.typ, c.custom) + default: + return c.typ.String() + } +} + +type TupleTypeInfo struct { + NativeType + Elems []TypeInfo +} + +func (t TupleTypeInfo) String() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s(", t.typ)) + for _, elem := range t.Elems { + buf.WriteString(fmt.Sprintf("%s, ", elem)) + } + buf.Truncate(buf.Len() - 2) + buf.WriteByte(')') + return buf.String() +} + +func (t TupleTypeInfo) NewWithError() (interface{}, error) { + typ, err := goType(t) + if err != nil { + return nil, err + } + return reflect.New(typ).Interface(), nil +} + +func (t TupleTypeInfo) New() interface{} { + val, err := t.NewWithError() + if err != nil { + panic(err.Error()) + } + return val +} + +type UDTField struct { + Name string + Type TypeInfo +} + +type UDTTypeInfo struct { + NativeType + KeySpace string + Name string + Elements []UDTField +} + +func (u UDTTypeInfo) NewWithError() (interface{}, error) { + typ, err := goType(u) + if err != nil { + return nil, err + } + return reflect.New(typ).Interface(), nil +} + +func (u UDTTypeInfo) New() interface{} { + val, err := u.NewWithError() + if err != nil { + panic(err.Error()) + } + return val +} + +func (u UDTTypeInfo) String() string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "%s.%s{", u.KeySpace, u.Name) + first := true + for _, e := range u.Elements { + if !first { + fmt.Fprint(buf, ",") + } else { + first = false + } + + fmt.Fprintf(buf, "%s=%v", e.Name, e.Type) + } + fmt.Fprint(buf, "}") + + return buf.String() +} + +// String returns a human readable name for the Cassandra datatype +// described by t. +// Type is the identifier of a Cassandra internal datatype. +type Type int + +const ( + TypeCustom Type = 0x0000 + TypeAscii Type = 0x0001 + TypeBigInt Type = 0x0002 + TypeBlob Type = 0x0003 + TypeBoolean Type = 0x0004 + TypeCounter Type = 0x0005 + TypeDecimal Type = 0x0006 + TypeDouble Type = 0x0007 + TypeFloat Type = 0x0008 + TypeInt Type = 0x0009 + TypeText Type = 0x000A + TypeTimestamp Type = 0x000B + TypeUUID Type = 0x000C + TypeVarchar Type = 0x000D + TypeVarint Type = 0x000E + TypeTimeUUID Type = 0x000F + TypeInet Type = 0x0010 + TypeDate Type = 0x0011 + TypeTime Type = 0x0012 + TypeSmallInt Type = 0x0013 + TypeTinyInt Type = 0x0014 + TypeDuration Type = 0x0015 + TypeList Type = 0x0020 + TypeMap Type = 0x0021 + TypeSet Type = 0x0022 + TypeUDT Type = 0x0030 + TypeTuple Type = 0x0031 +) + +// String returns the name of the identifier. +func (t Type) String() string { + switch t { + case TypeCustom: + return "custom" + case TypeAscii: + return "ascii" + case TypeBigInt: + return "bigint" + case TypeBlob: + return "blob" + case TypeBoolean: + return "boolean" + case TypeCounter: + return "counter" + case TypeDecimal: + return "decimal" + case TypeDouble: + return "double" + case TypeFloat: + return "float" + case TypeInt: + return "int" + case TypeText: + return "text" + case TypeTimestamp: + return "timestamp" + case TypeUUID: + return "uuid" + case TypeVarchar: + return "varchar" + case TypeTimeUUID: + return "timeuuid" + case TypeInet: + return "inet" + case TypeDate: + return "date" + case TypeDuration: + return "duration" + case TypeTime: + return "time" + case TypeSmallInt: + return "smallint" + case TypeTinyInt: + return "tinyint" + case TypeList: + return "list" + case TypeMap: + return "map" + case TypeSet: + return "set" + case TypeVarint: + return "varint" + case TypeTuple: + return "tuple" + default: + return fmt.Sprintf("unknown_type_%d", t) + } +} + +type MarshalError string + +func (m MarshalError) Error() string { + return string(m) +} + +func marshalErrorf(format string, args ...interface{}) MarshalError { + return MarshalError(fmt.Sprintf(format, args...)) +} + +type UnmarshalError string + +func (m UnmarshalError) Error() string { + return string(m) +} + +func unmarshalErrorf(format string, args ...interface{}) UnmarshalError { + return UnmarshalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/github.com/gocql/gocql/metadata.go b/vendor/github.com/gocql/gocql/metadata.go new file mode 100644 index 000000000..13345d937 --- /dev/null +++ b/vendor/github.com/gocql/gocql/metadata.go @@ -0,0 +1,1465 @@ +// Copyright (c) 2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" +) + +// schema metadata for a keyspace +type KeyspaceMetadata struct { + Name string + DurableWrites bool + StrategyClass string + StrategyOptions map[string]interface{} + Tables map[string]*TableMetadata + Functions map[string]*FunctionMetadata + Aggregates map[string]*AggregateMetadata + // Deprecated: use the MaterializedViews field for views and UserTypes field for udts instead. + Views map[string]*ViewMetadata + MaterializedViews map[string]*MaterializedViewMetadata + UserTypes map[string]*UserTypeMetadata +} + +// schema metadata for a table (a.k.a. column family) +type TableMetadata struct { + Keyspace string + Name string + KeyValidator string + Comparator string + DefaultValidator string + KeyAliases []string + ColumnAliases []string + ValueAlias string + PartitionKey []*ColumnMetadata + ClusteringColumns []*ColumnMetadata + Columns map[string]*ColumnMetadata + OrderedColumns []string +} + +// schema metadata for a column +type ColumnMetadata struct { + Keyspace string + Table string + Name string + ComponentIndex int + Kind ColumnKind + Validator string + Type TypeInfo + ClusteringOrder string + Order ColumnOrder + Index ColumnIndexMetadata +} + +// FunctionMetadata holds metadata for function constructs +type FunctionMetadata struct { + Keyspace string + Name string + ArgumentTypes []TypeInfo + ArgumentNames []string + Body string + CalledOnNullInput bool + Language string + ReturnType TypeInfo +} + +// AggregateMetadata holds metadata for aggregate constructs +type AggregateMetadata struct { + Keyspace string + Name string + ArgumentTypes []TypeInfo + FinalFunc FunctionMetadata + InitCond string + ReturnType TypeInfo + StateFunc FunctionMetadata + StateType TypeInfo + + stateFunc string + finalFunc string +} + +// ViewMetadata holds the metadata for views. +// Deprecated: this is kept for backwards compatibility issues. Use MaterializedViewMetadata. +type ViewMetadata struct { + Keyspace string + Name string + FieldNames []string + FieldTypes []TypeInfo +} + +// MaterializedViewMetadata holds the metadata for materialized views. +type MaterializedViewMetadata struct { + Keyspace string + Name string + BaseTableId UUID + BaseTable *TableMetadata + BloomFilterFpChance float64 + Caching map[string]string + Comment string + Compaction map[string]string + Compression map[string]string + CrcCheckChance float64 + DcLocalReadRepairChance float64 + DefaultTimeToLive int + Extensions map[string]string + GcGraceSeconds int + Id UUID + IncludeAllColumns bool + MaxIndexInterval int + MemtableFlushPeriodInMs int + MinIndexInterval int + ReadRepairChance float64 + SpeculativeRetry string + + baseTableName string +} + +type UserTypeMetadata struct { + Keyspace string + Name string + FieldNames []string + FieldTypes []TypeInfo +} + +// the ordering of the column with regard to its comparator +type ColumnOrder bool + +const ( + ASC ColumnOrder = false + DESC ColumnOrder = true +) + +type ColumnIndexMetadata struct { + Name string + Type string + Options map[string]interface{} +} + +type ColumnKind int + +const ( + ColumnUnkownKind ColumnKind = iota + ColumnPartitionKey + ColumnClusteringKey + ColumnRegular + ColumnCompact + ColumnStatic +) + +func (c ColumnKind) String() string { + switch c { + case ColumnPartitionKey: + return "partition_key" + case ColumnClusteringKey: + return "clustering_key" + case ColumnRegular: + return "regular" + case ColumnCompact: + return "compact" + case ColumnStatic: + return "static" + default: + return fmt.Sprintf("unknown_column_%d", c) + } +} + +func (c *ColumnKind) UnmarshalCQL(typ TypeInfo, p []byte) error { + if typ.Type() != TypeVarchar { + return unmarshalErrorf("unable to marshall %s into ColumnKind, expected Varchar", typ) + } + + kind, err := columnKindFromSchema(string(p)) + if err != nil { + return err + } + *c = kind + + return nil +} + +func columnKindFromSchema(kind string) (ColumnKind, error) { + switch kind { + case "partition_key": + return ColumnPartitionKey, nil + case "clustering_key", "clustering": + return ColumnClusteringKey, nil + case "regular": + return ColumnRegular, nil + case "compact_value": + return ColumnCompact, nil + case "static": + return ColumnStatic, nil + default: + return -1, fmt.Errorf("unknown column kind: %q", kind) + } +} + +// default alias values +const ( + DEFAULT_KEY_ALIAS = "key" + DEFAULT_COLUMN_ALIAS = "column" + DEFAULT_VALUE_ALIAS = "value" +) + +// queries the cluster for schema information for a specific keyspace +type schemaDescriber struct { + session *Session + mu sync.Mutex + + cache map[string]*KeyspaceMetadata +} + +// creates a session bound schema describer which will query and cache +// keyspace metadata +func newSchemaDescriber(session *Session) *schemaDescriber { + return &schemaDescriber{ + session: session, + cache: map[string]*KeyspaceMetadata{}, + } +} + +// returns the cached KeyspaceMetadata held by the describer for the named +// keyspace. +func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, error) { + s.mu.Lock() + defer s.mu.Unlock() + + metadata, found := s.cache[keyspaceName] + if !found { + // refresh the cache for this keyspace + err := s.refreshSchema(keyspaceName) + if err != nil { + return nil, err + } + + metadata = s.cache[keyspaceName] + } + + return metadata, nil +} + +// clears the already cached keyspace metadata +func (s *schemaDescriber) clearSchema(keyspaceName string) { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.cache, keyspaceName) +} + +// forcibly updates the current KeyspaceMetadata held by the schema describer +// for a given named keyspace. +func (s *schemaDescriber) refreshSchema(keyspaceName string) error { + var err error + + // query the system keyspace for schema data + // TODO retrieve concurrently + keyspace, err := getKeyspaceMetadata(s.session, keyspaceName) + if err != nil { + return err + } + tables, err := getTableMetadata(s.session, keyspaceName) + if err != nil { + return err + } + columns, err := getColumnMetadata(s.session, keyspaceName) + if err != nil { + return err + } + functions, err := getFunctionsMetadata(s.session, keyspaceName) + if err != nil { + return err + } + aggregates, err := getAggregatesMetadata(s.session, keyspaceName) + if err != nil { + return err + } + views, err := getViewsMetadata(s.session, keyspaceName) + if err != nil { + return err + } + materializedViews, err := getMaterializedViewsMetadata(s.session, keyspaceName) + if err != nil { + return err + } + + // organize the schema data + compileMetadata(s.session.cfg.ProtoVersion, keyspace, tables, columns, functions, aggregates, views, + materializedViews, s.session.logger) + + // update the cache + s.cache[keyspaceName] = keyspace + + return nil +} + +// "compiles" derived information about keyspace, table, and column metadata +// for a keyspace from the basic queried metadata objects returned by +// getKeyspaceMetadata, getTableMetadata, and getColumnMetadata respectively; +// Links the metadata objects together and derives the column composition of +// the partition key and clustering key for a table. +func compileMetadata( + protoVersion int, + keyspace *KeyspaceMetadata, + tables []TableMetadata, + columns []ColumnMetadata, + functions []FunctionMetadata, + aggregates []AggregateMetadata, + views []ViewMetadata, + materializedViews []MaterializedViewMetadata, + logger StdLogger, +) { + keyspace.Tables = make(map[string]*TableMetadata) + for i := range tables { + tables[i].Columns = make(map[string]*ColumnMetadata) + + keyspace.Tables[tables[i].Name] = &tables[i] + } + keyspace.Functions = make(map[string]*FunctionMetadata, len(functions)) + for i := range functions { + keyspace.Functions[functions[i].Name] = &functions[i] + } + keyspace.Aggregates = make(map[string]*AggregateMetadata, len(aggregates)) + for i, _ := range aggregates { + aggregates[i].FinalFunc = *keyspace.Functions[aggregates[i].finalFunc] + aggregates[i].StateFunc = *keyspace.Functions[aggregates[i].stateFunc] + keyspace.Aggregates[aggregates[i].Name] = &aggregates[i] + } + keyspace.Views = make(map[string]*ViewMetadata, len(views)) + for i := range views { + keyspace.Views[views[i].Name] = &views[i] + } + // Views currently holds the types and hasn't been deleted for backward compatibility issues. + // That's why it's ok to copy Views into Types in this case. For the real Views use MaterializedViews. + types := make([]UserTypeMetadata, len(views)) + for i := range views { + types[i].Keyspace = views[i].Keyspace + types[i].Name = views[i].Name + types[i].FieldNames = views[i].FieldNames + types[i].FieldTypes = views[i].FieldTypes + } + keyspace.UserTypes = make(map[string]*UserTypeMetadata, len(views)) + for i := range types { + keyspace.UserTypes[types[i].Name] = &types[i] + } + keyspace.MaterializedViews = make(map[string]*MaterializedViewMetadata, len(materializedViews)) + for i, _ := range materializedViews { + materializedViews[i].BaseTable = keyspace.Tables[materializedViews[i].baseTableName] + keyspace.MaterializedViews[materializedViews[i].Name] = &materializedViews[i] + } + + // add columns from the schema data + for i := range columns { + col := &columns[i] + // decode the validator for TypeInfo and order + if col.ClusteringOrder != "" { // Cassandra 3.x+ + col.Type = getCassandraType(col.Validator, logger) + col.Order = ASC + if col.ClusteringOrder == "desc" { + col.Order = DESC + } + } else { + validatorParsed := parseType(col.Validator, logger) + col.Type = validatorParsed.types[0] + col.Order = ASC + if validatorParsed.reversed[0] { + col.Order = DESC + } + } + + table, ok := keyspace.Tables[col.Table] + if !ok { + // if the schema is being updated we will race between seeing + // the metadata be complete. Potentially we should check for + // schema versions before and after reading the metadata and + // if they dont match try again. + continue + } + + table.Columns[col.Name] = col + table.OrderedColumns = append(table.OrderedColumns, col.Name) + } + + if protoVersion == protoVersion1 { + compileV1Metadata(tables, logger) + } else { + compileV2Metadata(tables, logger) + } +} + +// Compiles derived information from TableMetadata which have had +// ColumnMetadata added already. V1 protocol does not return as much +// column metadata as V2+ (because V1 doesn't support the "type" column in the +// system.schema_columns table) so determining PartitionKey and ClusterColumns +// is more complex. +func compileV1Metadata(tables []TableMetadata, logger StdLogger) { + for i := range tables { + table := &tables[i] + + // decode the key validator + keyValidatorParsed := parseType(table.KeyValidator, logger) + // decode the comparator + comparatorParsed := parseType(table.Comparator, logger) + + // the partition key length is the same as the number of types in the + // key validator + table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types)) + + // V1 protocol only returns "regular" columns from + // system.schema_columns (there is no type field for columns) + // so the alias information is used to + // create the partition key and clustering columns + + // construct the partition key from the alias + for i := range table.PartitionKey { + var alias string + if len(table.KeyAliases) > i { + alias = table.KeyAliases[i] + } else if i == 0 { + alias = DEFAULT_KEY_ALIAS + } else { + alias = DEFAULT_KEY_ALIAS + strconv.Itoa(i+1) + } + + column := &ColumnMetadata{ + Keyspace: table.Keyspace, + Table: table.Name, + Name: alias, + Type: keyValidatorParsed.types[i], + Kind: ColumnPartitionKey, + ComponentIndex: i, + } + + table.PartitionKey[i] = column + table.Columns[alias] = column + } + + // determine the number of clustering columns + size := len(comparatorParsed.types) + if comparatorParsed.isComposite { + if len(comparatorParsed.collections) != 0 || + (len(table.ColumnAliases) == size-1 && + comparatorParsed.types[size-1].Type() == TypeVarchar) { + size = size - 1 + } + } else { + if !(len(table.ColumnAliases) != 0 || len(table.Columns) == 0) { + size = 0 + } + } + + table.ClusteringColumns = make([]*ColumnMetadata, size) + + for i := range table.ClusteringColumns { + var alias string + if len(table.ColumnAliases) > i { + alias = table.ColumnAliases[i] + } else if i == 0 { + alias = DEFAULT_COLUMN_ALIAS + } else { + alias = DEFAULT_COLUMN_ALIAS + strconv.Itoa(i+1) + } + + order := ASC + if comparatorParsed.reversed[i] { + order = DESC + } + + column := &ColumnMetadata{ + Keyspace: table.Keyspace, + Table: table.Name, + Name: alias, + Type: comparatorParsed.types[i], + Order: order, + Kind: ColumnClusteringKey, + ComponentIndex: i, + } + + table.ClusteringColumns[i] = column + table.Columns[alias] = column + } + + if size != len(comparatorParsed.types)-1 { + alias := DEFAULT_VALUE_ALIAS + if len(table.ValueAlias) > 0 { + alias = table.ValueAlias + } + // decode the default validator + defaultValidatorParsed := parseType(table.DefaultValidator, logger) + column := &ColumnMetadata{ + Keyspace: table.Keyspace, + Table: table.Name, + Name: alias, + Type: defaultValidatorParsed.types[0], + Kind: ColumnRegular, + } + table.Columns[alias] = column + } + } +} + +// The simpler compile case for V2+ protocol +func compileV2Metadata(tables []TableMetadata, logger StdLogger) { + for i := range tables { + table := &tables[i] + + clusteringColumnCount := componentColumnCountOfType(table.Columns, ColumnClusteringKey) + table.ClusteringColumns = make([]*ColumnMetadata, clusteringColumnCount) + + if table.KeyValidator != "" { + keyValidatorParsed := parseType(table.KeyValidator, logger) + table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types)) + } else { // Cassandra 3.x+ + partitionKeyCount := componentColumnCountOfType(table.Columns, ColumnPartitionKey) + table.PartitionKey = make([]*ColumnMetadata, partitionKeyCount) + } + + for _, columnName := range table.OrderedColumns { + column := table.Columns[columnName] + if column.Kind == ColumnPartitionKey { + table.PartitionKey[column.ComponentIndex] = column + } else if column.Kind == ColumnClusteringKey { + table.ClusteringColumns[column.ComponentIndex] = column + } + } + } +} + +// returns the count of coluns with the given "kind" value. +func componentColumnCountOfType(columns map[string]*ColumnMetadata, kind ColumnKind) int { + maxComponentIndex := -1 + for _, column := range columns { + if column.Kind == kind && column.ComponentIndex > maxComponentIndex { + maxComponentIndex = column.ComponentIndex + } + } + return maxComponentIndex + 1 +} + +// query only for the keyspace metadata for the specified keyspace from system.schema_keyspace +func getKeyspaceMetadata(session *Session, keyspaceName string) (*KeyspaceMetadata, error) { + keyspace := &KeyspaceMetadata{Name: keyspaceName} + + if session.useSystemSchema { // Cassandra 3.x+ + const stmt = ` + SELECT durable_writes, replication + FROM system_schema.keyspaces + WHERE keyspace_name = ?` + + var replication map[string]string + + iter := session.control.query(stmt, keyspaceName) + if iter.NumRows() == 0 { + return nil, ErrKeyspaceDoesNotExist + } + iter.Scan(&keyspace.DurableWrites, &replication) + err := iter.Close() + if err != nil { + return nil, fmt.Errorf("error querying keyspace schema: %v", err) + } + + keyspace.StrategyClass = replication["class"] + delete(replication, "class") + + keyspace.StrategyOptions = make(map[string]interface{}, len(replication)) + for k, v := range replication { + keyspace.StrategyOptions[k] = v + } + } else { + + const stmt = ` + SELECT durable_writes, strategy_class, strategy_options + FROM system.schema_keyspaces + WHERE keyspace_name = ?` + + var strategyOptionsJSON []byte + + iter := session.control.query(stmt, keyspaceName) + if iter.NumRows() == 0 { + return nil, ErrKeyspaceDoesNotExist + } + iter.Scan(&keyspace.DurableWrites, &keyspace.StrategyClass, &strategyOptionsJSON) + err := iter.Close() + if err != nil { + return nil, fmt.Errorf("error querying keyspace schema: %v", err) + } + + err = json.Unmarshal(strategyOptionsJSON, &keyspace.StrategyOptions) + if err != nil { + return nil, fmt.Errorf( + "invalid JSON value '%s' as strategy_options for in keyspace '%s': %v", + strategyOptionsJSON, keyspace.Name, err, + ) + } + } + + return keyspace, nil +} + +// query for only the table metadata in the specified keyspace from system.schema_columnfamilies +func getTableMetadata(session *Session, keyspaceName string) ([]TableMetadata, error) { + + var ( + iter *Iter + scan func(iter *Iter, table *TableMetadata) bool + stmt string + + keyAliasesJSON []byte + columnAliasesJSON []byte + ) + + if session.useSystemSchema { // Cassandra 3.x+ + stmt = ` + SELECT + table_name + FROM system_schema.tables + WHERE keyspace_name = ?` + + switchIter := func() *Iter { + iter.Close() + stmt = ` + SELECT + view_name + FROM system_schema.views + WHERE keyspace_name = ?` + iter = session.control.query(stmt, keyspaceName) + return iter + } + + scan = func(iter *Iter, table *TableMetadata) bool { + r := iter.Scan( + &table.Name, + ) + if !r { + iter = switchIter() + if iter != nil { + switchIter = func() *Iter { return nil } + r = iter.Scan(&table.Name) + } + } + return r + } + } else if session.cfg.ProtoVersion == protoVersion1 { + // we have key aliases + stmt = ` + SELECT + columnfamily_name, + key_validator, + comparator, + default_validator, + key_aliases, + column_aliases, + value_alias + FROM system.schema_columnfamilies + WHERE keyspace_name = ?` + + scan = func(iter *Iter, table *TableMetadata) bool { + return iter.Scan( + &table.Name, + &table.KeyValidator, + &table.Comparator, + &table.DefaultValidator, + &keyAliasesJSON, + &columnAliasesJSON, + &table.ValueAlias, + ) + } + } else { + stmt = ` + SELECT + columnfamily_name, + key_validator, + comparator, + default_validator + FROM system.schema_columnfamilies + WHERE keyspace_name = ?` + + scan = func(iter *Iter, table *TableMetadata) bool { + return iter.Scan( + &table.Name, + &table.KeyValidator, + &table.Comparator, + &table.DefaultValidator, + ) + } + } + + iter = session.control.query(stmt, keyspaceName) + + tables := []TableMetadata{} + table := TableMetadata{Keyspace: keyspaceName} + + for scan(iter, &table) { + var err error + + // decode the key aliases + if keyAliasesJSON != nil { + table.KeyAliases = []string{} + err = json.Unmarshal(keyAliasesJSON, &table.KeyAliases) + if err != nil { + iter.Close() + return nil, fmt.Errorf( + "invalid JSON value '%s' as key_aliases for in table '%s': %v", + keyAliasesJSON, table.Name, err, + ) + } + } + + // decode the column aliases + if columnAliasesJSON != nil { + table.ColumnAliases = []string{} + err = json.Unmarshal(columnAliasesJSON, &table.ColumnAliases) + if err != nil { + iter.Close() + return nil, fmt.Errorf( + "invalid JSON value '%s' as column_aliases for in table '%s': %v", + columnAliasesJSON, table.Name, err, + ) + } + } + + tables = append(tables, table) + table = TableMetadata{Keyspace: keyspaceName} + } + + err := iter.Close() + if err != nil && err != ErrNotFound { + return nil, fmt.Errorf("error querying table schema: %v", err) + } + + return tables, nil +} + +func (s *Session) scanColumnMetadataV1(keyspace string) ([]ColumnMetadata, error) { + // V1 does not support the type column, and all returned rows are + // of kind "regular". + const stmt = ` + SELECT + columnfamily_name, + column_name, + component_index, + validator, + index_name, + index_type, + index_options + FROM system.schema_columns + WHERE keyspace_name = ?` + + var columns []ColumnMetadata + + rows := s.control.query(stmt, keyspace).Scanner() + for rows.Next() { + var ( + column = ColumnMetadata{Keyspace: keyspace} + indexOptionsJSON []byte + ) + + // all columns returned by V1 are regular + column.Kind = ColumnRegular + + err := rows.Scan(&column.Table, + &column.Name, + &column.ComponentIndex, + &column.Validator, + &column.Index.Name, + &column.Index.Type, + &indexOptionsJSON) + + if err != nil { + return nil, err + } + + if len(indexOptionsJSON) > 0 { + err := json.Unmarshal(indexOptionsJSON, &column.Index.Options) + if err != nil { + return nil, fmt.Errorf( + "invalid JSON value '%s' as index_options for column '%s' in table '%s': %v", + indexOptionsJSON, + column.Name, + column.Table, + err) + } + } + + columns = append(columns, column) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return columns, nil +} + +func (s *Session) scanColumnMetadataV2(keyspace string) ([]ColumnMetadata, error) { + // V2+ supports the type column + const stmt = ` + SELECT + columnfamily_name, + column_name, + component_index, + validator, + index_name, + index_type, + index_options, + type + FROM system.schema_columns + WHERE keyspace_name = ?` + + var columns []ColumnMetadata + + rows := s.control.query(stmt, keyspace).Scanner() + for rows.Next() { + var ( + column = ColumnMetadata{Keyspace: keyspace} + indexOptionsJSON []byte + ) + + err := rows.Scan(&column.Table, + &column.Name, + &column.ComponentIndex, + &column.Validator, + &column.Index.Name, + &column.Index.Type, + &indexOptionsJSON, + &column.Kind, + ) + + if err != nil { + return nil, err + } + + if len(indexOptionsJSON) > 0 { + err := json.Unmarshal(indexOptionsJSON, &column.Index.Options) + if err != nil { + return nil, fmt.Errorf( + "invalid JSON value '%s' as index_options for column '%s' in table '%s': %v", + indexOptionsJSON, + column.Name, + column.Table, + err) + } + } + + columns = append(columns, column) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return columns, nil + +} + +func (s *Session) scanColumnMetadataSystem(keyspace string) ([]ColumnMetadata, error) { + const stmt = ` + SELECT + table_name, + column_name, + clustering_order, + type, + kind, + position + FROM system_schema.columns + WHERE keyspace_name = ?` + + var columns []ColumnMetadata + + rows := s.control.query(stmt, keyspace).Scanner() + for rows.Next() { + column := ColumnMetadata{Keyspace: keyspace} + + err := rows.Scan(&column.Table, + &column.Name, + &column.ClusteringOrder, + &column.Validator, + &column.Kind, + &column.ComponentIndex, + ) + + if err != nil { + return nil, err + } + + columns = append(columns, column) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + // TODO(zariel): get column index info from system_schema.indexes + + return columns, nil +} + +// query for only the column metadata in the specified keyspace from system.schema_columns +func getColumnMetadata(session *Session, keyspaceName string) ([]ColumnMetadata, error) { + var ( + columns []ColumnMetadata + err error + ) + + // Deal with differences in protocol versions + if session.cfg.ProtoVersion == 1 { + columns, err = session.scanColumnMetadataV1(keyspaceName) + } else if session.useSystemSchema { // Cassandra 3.x+ + columns, err = session.scanColumnMetadataSystem(keyspaceName) + } else { + columns, err = session.scanColumnMetadataV2(keyspaceName) + } + + if err != nil && err != ErrNotFound { + return nil, fmt.Errorf("error querying column schema: %v", err) + } + + return columns, nil +} + +func getTypeInfo(t string, logger StdLogger) TypeInfo { + if strings.HasPrefix(t, apacheCassandraTypePrefix) { + t = apacheToCassandraType(t) + } + return getCassandraType(t, logger) +} + +func getViewsMetadata(session *Session, keyspaceName string) ([]ViewMetadata, error) { + if session.cfg.ProtoVersion == protoVersion1 { + return nil, nil + } + var tableName string + if session.useSystemSchema { + tableName = "system_schema.types" + } else { + tableName = "system.schema_usertypes" + } + stmt := fmt.Sprintf(` + SELECT + type_name, + field_names, + field_types + FROM %s + WHERE keyspace_name = ?`, tableName) + + var views []ViewMetadata + + rows := session.control.query(stmt, keyspaceName).Scanner() + for rows.Next() { + view := ViewMetadata{Keyspace: keyspaceName} + var argumentTypes []string + err := rows.Scan(&view.Name, + &view.FieldNames, + &argumentTypes, + ) + if err != nil { + return nil, err + } + view.FieldTypes = make([]TypeInfo, len(argumentTypes)) + for i, argumentType := range argumentTypes { + view.FieldTypes[i] = getTypeInfo(argumentType, session.logger) + } + views = append(views, view) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return views, nil +} + +func getMaterializedViewsMetadata(session *Session, keyspaceName string) ([]MaterializedViewMetadata, error) { + if !session.useSystemSchema { + return nil, nil + } + var tableName = "system_schema.views" + stmt := fmt.Sprintf(` + SELECT + view_name, + base_table_id, + base_table_name, + bloom_filter_fp_chance, + caching, + comment, + compaction, + compression, + crc_check_chance, + dclocal_read_repair_chance, + default_time_to_live, + extensions, + gc_grace_seconds, + id, + include_all_columns, + max_index_interval, + memtable_flush_period_in_ms, + min_index_interval, + read_repair_chance, + speculative_retry + FROM %s + WHERE keyspace_name = ?`, tableName) + + var materializedViews []MaterializedViewMetadata + + rows := session.control.query(stmt, keyspaceName).Scanner() + for rows.Next() { + materializedView := MaterializedViewMetadata{Keyspace: keyspaceName} + err := rows.Scan(&materializedView.Name, + &materializedView.BaseTableId, + &materializedView.baseTableName, + &materializedView.BloomFilterFpChance, + &materializedView.Caching, + &materializedView.Comment, + &materializedView.Compaction, + &materializedView.Compression, + &materializedView.CrcCheckChance, + &materializedView.DcLocalReadRepairChance, + &materializedView.DefaultTimeToLive, + &materializedView.Extensions, + &materializedView.GcGraceSeconds, + &materializedView.Id, + &materializedView.IncludeAllColumns, + &materializedView.MaxIndexInterval, + &materializedView.MemtableFlushPeriodInMs, + &materializedView.MinIndexInterval, + &materializedView.ReadRepairChance, + &materializedView.SpeculativeRetry, + ) + if err != nil { + return nil, err + } + materializedViews = append(materializedViews, materializedView) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return materializedViews, nil +} + +func getFunctionsMetadata(session *Session, keyspaceName string) ([]FunctionMetadata, error) { + if session.cfg.ProtoVersion == protoVersion1 || !session.hasAggregatesAndFunctions { + return nil, nil + } + var tableName string + if session.useSystemSchema { + tableName = "system_schema.functions" + } else { + tableName = "system.schema_functions" + } + stmt := fmt.Sprintf(` + SELECT + function_name, + argument_types, + argument_names, + body, + called_on_null_input, + language, + return_type + FROM %s + WHERE keyspace_name = ?`, tableName) + + var functions []FunctionMetadata + + rows := session.control.query(stmt, keyspaceName).Scanner() + for rows.Next() { + function := FunctionMetadata{Keyspace: keyspaceName} + var argumentTypes []string + var returnType string + err := rows.Scan(&function.Name, + &argumentTypes, + &function.ArgumentNames, + &function.Body, + &function.CalledOnNullInput, + &function.Language, + &returnType, + ) + if err != nil { + return nil, err + } + function.ReturnType = getTypeInfo(returnType, session.logger) + function.ArgumentTypes = make([]TypeInfo, len(argumentTypes)) + for i, argumentType := range argumentTypes { + function.ArgumentTypes[i] = getTypeInfo(argumentType, session.logger) + } + functions = append(functions, function) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return functions, nil +} + +func getAggregatesMetadata(session *Session, keyspaceName string) ([]AggregateMetadata, error) { + if session.cfg.ProtoVersion == protoVersion1 || !session.hasAggregatesAndFunctions { + return nil, nil + } + var tableName string + if session.useSystemSchema { + tableName = "system_schema.aggregates" + } else { + tableName = "system.schema_aggregates" + } + + stmt := fmt.Sprintf(` + SELECT + aggregate_name, + argument_types, + final_func, + initcond, + return_type, + state_func, + state_type + FROM %s + WHERE keyspace_name = ?`, tableName) + + var aggregates []AggregateMetadata + + rows := session.control.query(stmt, keyspaceName).Scanner() + for rows.Next() { + aggregate := AggregateMetadata{Keyspace: keyspaceName} + var argumentTypes []string + var returnType string + var stateType string + err := rows.Scan(&aggregate.Name, + &argumentTypes, + &aggregate.finalFunc, + &aggregate.InitCond, + &returnType, + &aggregate.stateFunc, + &stateType, + ) + if err != nil { + return nil, err + } + aggregate.ReturnType = getTypeInfo(returnType, session.logger) + aggregate.StateType = getTypeInfo(stateType, session.logger) + aggregate.ArgumentTypes = make([]TypeInfo, len(argumentTypes)) + for i, argumentType := range argumentTypes { + aggregate.ArgumentTypes[i] = getTypeInfo(argumentType, session.logger) + } + aggregates = append(aggregates, aggregate) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return aggregates, nil +} + +// type definition parser state +type typeParser struct { + input string + index int + logger StdLogger +} + +// the type definition parser result +type typeParserResult struct { + isComposite bool + types []TypeInfo + reversed []bool + collections map[string]TypeInfo +} + +// Parse the type definition used for validator and comparator schema data +func parseType(def string, logger StdLogger) typeParserResult { + parser := &typeParser{input: def, logger: logger} + return parser.parse() +} + +const ( + REVERSED_TYPE = "org.apache.cassandra.db.marshal.ReversedType" + COMPOSITE_TYPE = "org.apache.cassandra.db.marshal.CompositeType" + COLLECTION_TYPE = "org.apache.cassandra.db.marshal.ColumnToCollectionType" + LIST_TYPE = "org.apache.cassandra.db.marshal.ListType" + SET_TYPE = "org.apache.cassandra.db.marshal.SetType" + MAP_TYPE = "org.apache.cassandra.db.marshal.MapType" +) + +// represents a class specification in the type def AST +type typeParserClassNode struct { + name string + params []typeParserParamNode + // this is the segment of the input string that defined this node + input string +} + +// represents a class parameter in the type def AST +type typeParserParamNode struct { + name *string + class typeParserClassNode +} + +func (t *typeParser) parse() typeParserResult { + // parse the AST + ast, ok := t.parseClassNode() + if !ok { + // treat this is a custom type + return typeParserResult{ + isComposite: false, + types: []TypeInfo{ + NativeType{ + typ: TypeCustom, + custom: t.input, + }, + }, + reversed: []bool{false}, + collections: nil, + } + } + + // interpret the AST + if strings.HasPrefix(ast.name, COMPOSITE_TYPE) { + count := len(ast.params) + + // look for a collections param + last := ast.params[count-1] + collections := map[string]TypeInfo{} + if strings.HasPrefix(last.class.name, COLLECTION_TYPE) { + count-- + + for _, param := range last.class.params { + // decode the name + var name string + decoded, err := hex.DecodeString(*param.name) + if err != nil { + t.logger.Printf( + "Error parsing type '%s', contains collection name '%s' with an invalid format: %v", + t.input, + *param.name, + err, + ) + // just use the provided name + name = *param.name + } else { + name = string(decoded) + } + collections[name] = param.class.asTypeInfo() + } + } + + types := make([]TypeInfo, count) + reversed := make([]bool, count) + + for i, param := range ast.params[:count] { + class := param.class + reversed[i] = strings.HasPrefix(class.name, REVERSED_TYPE) + if reversed[i] { + class = class.params[0].class + } + types[i] = class.asTypeInfo() + } + + return typeParserResult{ + isComposite: true, + types: types, + reversed: reversed, + collections: collections, + } + } else { + // not composite, so one type + class := *ast + reversed := strings.HasPrefix(class.name, REVERSED_TYPE) + if reversed { + class = class.params[0].class + } + typeInfo := class.asTypeInfo() + + return typeParserResult{ + isComposite: false, + types: []TypeInfo{typeInfo}, + reversed: []bool{reversed}, + } + } +} + +func (class *typeParserClassNode) asTypeInfo() TypeInfo { + if strings.HasPrefix(class.name, LIST_TYPE) { + elem := class.params[0].class.asTypeInfo() + return CollectionType{ + NativeType: NativeType{ + typ: TypeList, + }, + Elem: elem, + } + } + if strings.HasPrefix(class.name, SET_TYPE) { + elem := class.params[0].class.asTypeInfo() + return CollectionType{ + NativeType: NativeType{ + typ: TypeSet, + }, + Elem: elem, + } + } + if strings.HasPrefix(class.name, MAP_TYPE) { + key := class.params[0].class.asTypeInfo() + elem := class.params[1].class.asTypeInfo() + return CollectionType{ + NativeType: NativeType{ + typ: TypeMap, + }, + Key: key, + Elem: elem, + } + } + + // must be a simple type or custom type + info := NativeType{typ: getApacheCassandraType(class.name)} + if info.typ == TypeCustom { + // add the entire class definition + info.custom = class.input + } + return info +} + +// CLASS := ID [ PARAMS ] +func (t *typeParser) parseClassNode() (node *typeParserClassNode, ok bool) { + t.skipWhitespace() + + startIndex := t.index + + name, ok := t.nextIdentifier() + if !ok { + return nil, false + } + + params, ok := t.parseParamNodes() + if !ok { + return nil, false + } + + endIndex := t.index + + node = &typeParserClassNode{ + name: name, + params: params, + input: t.input[startIndex:endIndex], + } + return node, true +} + +// PARAMS := "(" PARAM { "," PARAM } ")" +// PARAM := [ PARAM_NAME ":" ] CLASS +// PARAM_NAME := ID +func (t *typeParser) parseParamNodes() (params []typeParserParamNode, ok bool) { + t.skipWhitespace() + + // the params are optional + if t.index == len(t.input) || t.input[t.index] != '(' { + return nil, true + } + + params = []typeParserParamNode{} + + // consume the '(' + t.index++ + + t.skipWhitespace() + + for t.input[t.index] != ')' { + // look for a named param, but if no colon, then we want to backup + backupIndex := t.index + + // name will be a hex encoded version of a utf-8 string + name, ok := t.nextIdentifier() + if !ok { + return nil, false + } + hasName := true + + // TODO handle '=>' used for DynamicCompositeType + + t.skipWhitespace() + + if t.input[t.index] == ':' { + // there is a name for this parameter + + // consume the ':' + t.index++ + + t.skipWhitespace() + } else { + // no name, backup + hasName = false + t.index = backupIndex + } + + // parse the next full parameter + classNode, ok := t.parseClassNode() + if !ok { + return nil, false + } + + if hasName { + params = append( + params, + typeParserParamNode{name: &name, class: *classNode}, + ) + } else { + params = append( + params, + typeParserParamNode{class: *classNode}, + ) + } + + t.skipWhitespace() + + if t.input[t.index] == ',' { + // consume the comma + t.index++ + + t.skipWhitespace() + } + } + + // consume the ')' + t.index++ + + return params, true +} + +func (t *typeParser) skipWhitespace() { + for t.index < len(t.input) && isWhitespaceChar(t.input[t.index]) { + t.index++ + } +} + +func isWhitespaceChar(c byte) bool { + return c == ' ' || c == '\n' || c == '\t' +} + +// ID := LETTER { LETTER } +// LETTER := "0"..."9" | "a"..."z" | "A"..."Z" | "-" | "+" | "." | "_" | "&" +func (t *typeParser) nextIdentifier() (id string, found bool) { + startIndex := t.index + for t.index < len(t.input) && isIdentifierChar(t.input[t.index]) { + t.index++ + } + if startIndex == t.index { + return "", false + } + return t.input[startIndex:t.index], true +} + +func isIdentifierChar(c byte) bool { + return (c >= '0' && c <= '9') || + (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + c == '-' || + c == '+' || + c == '.' || + c == '_' || + c == '&' +} diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go new file mode 100644 index 000000000..ffb4710e5 --- /dev/null +++ b/vendor/github.com/gocql/gocql/policies.go @@ -0,0 +1,1090 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +//This file will be the future home for more policies + +import ( + "context" + "errors" + "fmt" + "math" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hailocab/go-hostpool" +) + +// cowHostList implements a copy on write host list, its equivalent type is []*HostInfo +type cowHostList struct { + list atomic.Value + mu sync.Mutex +} + +func (c *cowHostList) String() string { + return fmt.Sprintf("%+v", c.get()) +} + +func (c *cowHostList) get() []*HostInfo { + // TODO(zariel): should we replace this with []*HostInfo? + l, ok := c.list.Load().(*[]*HostInfo) + if !ok { + return nil + } + return *l +} + +// add will add a host if it not already in the list +func (c *cowHostList) add(host *HostInfo) bool { + c.mu.Lock() + l := c.get() + + if n := len(l); n == 0 { + l = []*HostInfo{host} + } else { + newL := make([]*HostInfo, n+1) + for i := 0; i < n; i++ { + if host.Equal(l[i]) { + c.mu.Unlock() + return false + } + newL[i] = l[i] + } + newL[n] = host + l = newL + } + + c.list.Store(&l) + c.mu.Unlock() + return true +} + +func (c *cowHostList) remove(ip net.IP) bool { + c.mu.Lock() + l := c.get() + size := len(l) + if size == 0 { + c.mu.Unlock() + return false + } + + found := false + newL := make([]*HostInfo, 0, size) + for i := 0; i < len(l); i++ { + if !l[i].ConnectAddress().Equal(ip) { + newL = append(newL, l[i]) + } else { + found = true + } + } + + if !found { + c.mu.Unlock() + return false + } + + newL = newL[: size-1 : size-1] + c.list.Store(&newL) + c.mu.Unlock() + + return true +} + +// RetryableQuery is an interface that represents a query or batch statement that +// exposes the correct functions for the retry policy logic to evaluate correctly. +type RetryableQuery interface { + Attempts() int + SetConsistency(c Consistency) + GetConsistency() Consistency + Context() context.Context +} + +type RetryType uint16 + +const ( + Retry RetryType = 0x00 // retry on same connection + RetryNextHost RetryType = 0x01 // retry on another connection + Ignore RetryType = 0x02 // ignore error and return result + Rethrow RetryType = 0x03 // raise error and stop retrying +) + +// ErrUnknownRetryType is returned if the retry policy returns a retry type +// unknown to the query executor. +var ErrUnknownRetryType = errors.New("unknown retry type returned by retry policy") + +// RetryPolicy interface is used by gocql to determine if a query can be attempted +// again after a retryable error has been received. The interface allows gocql +// users to implement their own logic to determine if a query can be attempted +// again. +// +// See SimpleRetryPolicy as an example of implementing and using a RetryPolicy +// interface. +type RetryPolicy interface { + Attempt(RetryableQuery) bool + GetRetryType(error) RetryType +} + +// SimpleRetryPolicy has simple logic for attempting a query a fixed number of times. +// +// See below for examples of usage: +// +// //Assign to the cluster +// cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: 3} +// +// //Assign to a query +// query.RetryPolicy(&gocql.SimpleRetryPolicy{NumRetries: 1}) +type SimpleRetryPolicy struct { + NumRetries int //Number of times to retry a query +} + +// Attempt tells gocql to attempt the query again based on query.Attempts being less +// than the NumRetries defined in the policy. +func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool { + return q.Attempts() <= s.NumRetries +} + +func (s *SimpleRetryPolicy) GetRetryType(err error) RetryType { + return RetryNextHost +} + +// ExponentialBackoffRetryPolicy sleeps between attempts +type ExponentialBackoffRetryPolicy struct { + NumRetries int + Min, Max time.Duration +} + +func (e *ExponentialBackoffRetryPolicy) Attempt(q RetryableQuery) bool { + if q.Attempts() > e.NumRetries { + return false + } + time.Sleep(e.napTime(q.Attempts())) + return true +} + +// used to calculate exponentially growing time +func getExponentialTime(min time.Duration, max time.Duration, attempts int) time.Duration { + if min <= 0 { + min = 100 * time.Millisecond + } + if max <= 0 { + max = 10 * time.Second + } + minFloat := float64(min) + napDuration := minFloat * math.Pow(2, float64(attempts-1)) + // add some jitter + napDuration += rand.Float64()*minFloat - (minFloat / 2) + if napDuration > float64(max) { + return time.Duration(max) + } + return time.Duration(napDuration) +} + +func (e *ExponentialBackoffRetryPolicy) GetRetryType(err error) RetryType { + return RetryNextHost +} + +// DowngradingConsistencyRetryPolicy: Next retry will be with the next consistency level +// provided in the slice +// +// On a read timeout: the operation is retried with the next provided consistency +// level. +// +// On a write timeout: if the operation is an :attr:`~.UNLOGGED_BATCH` +// and at least one replica acknowledged the write, the operation is +// retried with the next consistency level. Furthermore, for other +// write types, if at least one replica acknowledged the write, the +// timeout is ignored. +// +// On an unavailable exception: if at least one replica is alive, the +// operation is retried with the next provided consistency level. + +type DowngradingConsistencyRetryPolicy struct { + ConsistencyLevelsToTry []Consistency +} + +func (d *DowngradingConsistencyRetryPolicy) Attempt(q RetryableQuery) bool { + currentAttempt := q.Attempts() + + if currentAttempt > len(d.ConsistencyLevelsToTry) { + return false + } else if currentAttempt > 0 { + q.SetConsistency(d.ConsistencyLevelsToTry[currentAttempt-1]) + } + return true +} + +func (d *DowngradingConsistencyRetryPolicy) GetRetryType(err error) RetryType { + switch t := err.(type) { + case *RequestErrUnavailable: + if t.Alive > 0 { + return Retry + } + return Rethrow + case *RequestErrWriteTimeout: + if t.WriteType == "SIMPLE" || t.WriteType == "BATCH" || t.WriteType == "COUNTER" { + if t.Received > 0 { + return Ignore + } + return Rethrow + } + if t.WriteType == "UNLOGGED_BATCH" { + return Retry + } + return Rethrow + case *RequestErrReadTimeout: + return Retry + default: + return RetryNextHost + } +} + +func (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration { + return getExponentialTime(e.Min, e.Max, attempts) +} + +type HostStateNotifier interface { + AddHost(host *HostInfo) + RemoveHost(host *HostInfo) + HostUp(host *HostInfo) + HostDown(host *HostInfo) +} + +type KeyspaceUpdateEvent struct { + Keyspace string + Change string +} + +type HostTierer interface { + // HostTier returns an integer specifying how far a host is from the client. + // Tier must start at 0. + // The value is used to prioritize closer hosts during host selection. + // For example this could be: + // 0 - local rack, 1 - local DC, 2 - remote DC + // or: + // 0 - local DC, 1 - remote DC + HostTier(host *HostInfo) uint + + // This function returns the maximum possible host tier + MaxHostTier() uint +} + +// HostSelectionPolicy is an interface for selecting +// the most appropriate host to execute a given query. +// HostSelectionPolicy instances cannot be shared between sessions. +type HostSelectionPolicy interface { + HostStateNotifier + SetPartitioner + KeyspaceChanged(KeyspaceUpdateEvent) + Init(*Session) + IsLocal(host *HostInfo) bool + // Pick returns an iteration function over selected hosts. + // Multiple attempts of a single query execution won't call the returned NextHost function concurrently, + // so it's safe to have internal state without additional synchronization as long as every call to Pick returns + // a different instance of NextHost. + Pick(ExecutableQuery) NextHost +} + +// SelectedHost is an interface returned when picking a host from a host +// selection policy. +type SelectedHost interface { + Info() *HostInfo + Mark(error) +} + +type selectedHost HostInfo + +func (host *selectedHost) Info() *HostInfo { + return (*HostInfo)(host) +} + +func (host *selectedHost) Mark(err error) {} + +// NextHost is an iteration function over picked hosts +type NextHost func() SelectedHost + +// RoundRobinHostPolicy is a round-robin load balancing policy, where each host +// is tried sequentially for each query. +func RoundRobinHostPolicy() HostSelectionPolicy { + return &roundRobinHostPolicy{} +} + +type roundRobinHostPolicy struct { + hosts cowHostList + lastUsedHostIdx uint64 +} + +func (r *roundRobinHostPolicy) IsLocal(*HostInfo) bool { return true } +func (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} +func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {} +func (r *roundRobinHostPolicy) Init(*Session) {} + +func (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost { + nextStartOffset := atomic.AddUint64(&r.lastUsedHostIdx, 1) + return roundRobbin(int(nextStartOffset), r.hosts.get()) +} + +func (r *roundRobinHostPolicy) AddHost(host *HostInfo) { + r.hosts.add(host) +} + +func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) { + r.hosts.remove(host.ConnectAddress()) +} + +func (r *roundRobinHostPolicy) HostUp(host *HostInfo) { + r.AddHost(host) +} + +func (r *roundRobinHostPolicy) HostDown(host *HostInfo) { + r.RemoveHost(host) +} + +func ShuffleReplicas() func(*tokenAwareHostPolicy) { + return func(t *tokenAwareHostPolicy) { + t.shuffleReplicas = true + } +} + +// NonLocalReplicasFallback enables fallback to replicas that are not considered local. +// +// TokenAwareHostPolicy used with DCAwareHostPolicy fallback first selects replicas by partition key in local DC, then +// falls back to other nodes in the local DC. Enabling NonLocalReplicasFallback causes TokenAwareHostPolicy +// to first select replicas by partition key in local DC, then replicas by partition key in remote DCs and fall back +// to other nodes in local DC. +func NonLocalReplicasFallback() func(policy *tokenAwareHostPolicy) { + return func(t *tokenAwareHostPolicy) { + t.nonLocalReplicasFallback = true + } +} + +// TokenAwareHostPolicy is a token aware host selection policy, where hosts are +// selected based on the partition key, so queries are sent to the host which +// owns the partition. Fallback is used when routing information is not available. +func TokenAwareHostPolicy(fallback HostSelectionPolicy, opts ...func(*tokenAwareHostPolicy)) HostSelectionPolicy { + p := &tokenAwareHostPolicy{fallback: fallback} + for _, opt := range opts { + opt(p) + } + return p +} + +// clusterMeta holds metadata about cluster topology. +// It is used inside atomic.Value and shallow copies are used when replacing it, +// so fields should not be modified in-place. Instead, to modify a field a copy of the field should be made +// and the pointer in clusterMeta updated to point to the new value. +type clusterMeta struct { + // replicas is map[keyspace]map[token]hosts + replicas map[string]tokenRingReplicas + tokenRing *tokenRing +} + +type tokenAwareHostPolicy struct { + fallback HostSelectionPolicy + getKeyspaceMetadata func(keyspace string) (*KeyspaceMetadata, error) + getKeyspaceName func() string + + shuffleReplicas bool + nonLocalReplicasFallback bool + + // mu protects writes to hosts, partitioner, metadata. + // reads can be unlocked as long as they are not used for updating state later. + mu sync.Mutex + hosts cowHostList + partitioner string + metadata atomic.Value // *clusterMeta + + logger StdLogger +} + +func (t *tokenAwareHostPolicy) Init(s *Session) { + t.mu.Lock() + defer t.mu.Unlock() + if t.getKeyspaceMetadata != nil { + // Init was already called. + // See https://github.com/scylladb/gocql/issues/94. + panic("sharing token aware host selection policy between sessions is not supported") + } + t.getKeyspaceMetadata = s.KeyspaceMetadata + t.getKeyspaceName = func() string { return s.cfg.Keyspace } + t.logger = s.logger +} + +func (t *tokenAwareHostPolicy) IsLocal(host *HostInfo) bool { + return t.fallback.IsLocal(host) +} + +func (t *tokenAwareHostPolicy) KeyspaceChanged(update KeyspaceUpdateEvent) { + t.mu.Lock() + defer t.mu.Unlock() + meta := t.getMetadataForUpdate() + t.updateReplicas(meta, update.Keyspace) + t.metadata.Store(meta) +} + +// updateReplicas updates replicas in clusterMeta. +// It must be called with t.mu mutex locked. +// meta must not be nil and it's replicas field will be updated. +func (t *tokenAwareHostPolicy) updateReplicas(meta *clusterMeta, keyspace string) { + newReplicas := make(map[string]tokenRingReplicas, len(meta.replicas)) + + ks, err := t.getKeyspaceMetadata(keyspace) + if err == nil { + strat := getStrategy(ks, t.logger) + if strat != nil { + if meta != nil && meta.tokenRing != nil { + newReplicas[keyspace] = strat.replicaMap(meta.tokenRing) + } + } + } + + for ks, replicas := range meta.replicas { + if ks != keyspace { + newReplicas[ks] = replicas + } + } + + meta.replicas = newReplicas +} + +func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.partitioner != partitioner { + t.fallback.SetPartitioner(partitioner) + t.partitioner = partitioner + meta := t.getMetadataForUpdate() + meta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger) + t.updateReplicas(meta, t.getKeyspaceName()) + t.metadata.Store(meta) + } +} + +func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { + t.mu.Lock() + if t.hosts.add(host) { + meta := t.getMetadataForUpdate() + meta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger) + t.updateReplicas(meta, t.getKeyspaceName()) + t.metadata.Store(meta) + } + t.mu.Unlock() + + t.fallback.AddHost(host) +} + +func (t *tokenAwareHostPolicy) AddHosts(hosts []*HostInfo) { + t.mu.Lock() + + for _, host := range hosts { + t.hosts.add(host) + } + + meta := t.getMetadataForUpdate() + meta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger) + t.updateReplicas(meta, t.getKeyspaceName()) + t.metadata.Store(meta) + + t.mu.Unlock() + + for _, host := range hosts { + t.fallback.AddHost(host) + } +} + +func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) { + t.mu.Lock() + if t.hosts.remove(host.ConnectAddress()) { + meta := t.getMetadataForUpdate() + meta.resetTokenRing(t.partitioner, t.hosts.get(), t.logger) + t.updateReplicas(meta, t.getKeyspaceName()) + t.metadata.Store(meta) + } + t.mu.Unlock() + + t.fallback.RemoveHost(host) +} + +func (t *tokenAwareHostPolicy) HostUp(host *HostInfo) { + t.fallback.HostUp(host) +} + +func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) { + t.fallback.HostDown(host) +} + +// getMetadataReadOnly returns current cluster metadata. +// Metadata uses copy on write, so the returned value should be only used for reading. +// To obtain a copy that could be updated, use getMetadataForUpdate instead. +func (t *tokenAwareHostPolicy) getMetadataReadOnly() *clusterMeta { + meta, _ := t.metadata.Load().(*clusterMeta) + return meta +} + +// getMetadataForUpdate returns clusterMeta suitable for updating. +// It is a SHALLOW copy of current metadata in case it was already set or new empty clusterMeta otherwise. +// This function should be called with t.mu mutex locked and the mutex should not be released before +// storing the new metadata. +func (t *tokenAwareHostPolicy) getMetadataForUpdate() *clusterMeta { + metaReadOnly := t.getMetadataReadOnly() + meta := new(clusterMeta) + if metaReadOnly != nil { + *meta = *metaReadOnly + } + return meta +} + +// resetTokenRing creates a new tokenRing. +// It must be called with t.mu locked. +func (m *clusterMeta) resetTokenRing(partitioner string, hosts []*HostInfo, logger StdLogger) { + if partitioner == "" { + // partitioner not yet set + return + } + + // create a new token ring + tokenRing, err := newTokenRing(partitioner, hosts) + if err != nil { + logger.Printf("Unable to update the token ring due to error: %s", err) + return + } + + // replace the token ring + m.tokenRing = tokenRing +} + +func (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost { + if qry == nil { + return t.fallback.Pick(qry) + } + + routingKey, err := qry.GetRoutingKey() + if err != nil { + return t.fallback.Pick(qry) + } else if routingKey == nil { + return t.fallback.Pick(qry) + } + + meta := t.getMetadataReadOnly() + if meta == nil || meta.tokenRing == nil { + return t.fallback.Pick(qry) + } + + token := meta.tokenRing.partitioner.Hash(routingKey) + ht := meta.replicas[qry.Keyspace()].replicasFor(token) + + var replicas []*HostInfo + if ht == nil { + host, _ := meta.tokenRing.GetHostForToken(token) + replicas = []*HostInfo{host} + } else { + replicas = ht.hosts + if t.shuffleReplicas { + replicas = shuffleHosts(replicas) + } + } + + var ( + fallbackIter NextHost + i, j, k int + remote [][]*HostInfo + tierer HostTierer + tiererOk bool + maxTier uint + ) + + if tierer, tiererOk = t.fallback.(HostTierer); tiererOk { + maxTier = tierer.MaxHostTier() + } else { + maxTier = 1 + } + + if t.nonLocalReplicasFallback { + remote = make([][]*HostInfo, maxTier) + } + + used := make(map[*HostInfo]bool, len(replicas)) + return func() SelectedHost { + for i < len(replicas) { + h := replicas[i] + i++ + + var tier uint + if tiererOk { + tier = tierer.HostTier(h) + } else if t.fallback.IsLocal(h) { + tier = 0 + } else { + tier = 1 + } + + if tier != 0 { + if t.nonLocalReplicasFallback { + remote[tier-1] = append(remote[tier-1], h) + } + continue + } + + if h.IsUp() { + used[h] = true + return (*selectedHost)(h) + } + } + + if t.nonLocalReplicasFallback { + for j < len(remote) && k < len(remote[j]) { + h := remote[j][k] + k++ + + if k >= len(remote[j]) { + j++ + k = 0 + } + + if h.IsUp() { + used[h] = true + return (*selectedHost)(h) + } + } + } + + if fallbackIter == nil { + // fallback + fallbackIter = t.fallback.Pick(qry) + } + + // filter the token aware selected hosts from the fallback hosts + for fallbackHost := fallbackIter(); fallbackHost != nil; fallbackHost = fallbackIter() { + if !used[fallbackHost.Info()] { + used[fallbackHost.Info()] = true + return fallbackHost + } + } + + return nil + } +} + +// HostPoolHostPolicy is a host policy which uses the bitly/go-hostpool library +// to distribute queries between hosts and prevent sending queries to +// unresponsive hosts. When creating the host pool that is passed to the policy +// use an empty slice of hosts as the hostpool will be populated later by gocql. +// See below for examples of usage: +// +// // Create host selection policy using a simple host pool +// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy(hostpool.New(nil)) +// +// // Create host selection policy using an epsilon greedy pool +// cluster.PoolConfig.HostSelectionPolicy = HostPoolHostPolicy( +// hostpool.NewEpsilonGreedy(nil, 0, &hostpool.LinearEpsilonValueCalculator{}), +// ) +func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy { + return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp} +} + +type hostPoolHostPolicy struct { + hp hostpool.HostPool + mu sync.RWMutex + hostMap map[string]*HostInfo +} + +func (r *hostPoolHostPolicy) Init(*Session) {} +func (r *hostPoolHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} +func (r *hostPoolHostPolicy) SetPartitioner(string) {} +func (r *hostPoolHostPolicy) IsLocal(*HostInfo) bool { return true } + +func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) { + peers := make([]string, len(hosts)) + hostMap := make(map[string]*HostInfo, len(hosts)) + + for i, host := range hosts { + ip := host.ConnectAddress().String() + peers[i] = ip + hostMap[ip] = host + } + + r.mu.Lock() + r.hp.SetHosts(peers) + r.hostMap = hostMap + r.mu.Unlock() +} + +func (r *hostPoolHostPolicy) AddHost(host *HostInfo) { + ip := host.ConnectAddress().String() + + r.mu.Lock() + defer r.mu.Unlock() + + // If the host addr is present and isn't nil return + if h, ok := r.hostMap[ip]; ok && h != nil { + return + } + // otherwise, add the host to the map + r.hostMap[ip] = host + // and construct a new peer list to give to the HostPool + hosts := make([]string, 0, len(r.hostMap)) + for addr := range r.hostMap { + hosts = append(hosts, addr) + } + + r.hp.SetHosts(hosts) +} + +func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) { + ip := host.ConnectAddress().String() + + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.hostMap[ip]; !ok { + return + } + + delete(r.hostMap, ip) + hosts := make([]string, 0, len(r.hostMap)) + for _, host := range r.hostMap { + hosts = append(hosts, host.ConnectAddress().String()) + } + + r.hp.SetHosts(hosts) +} + +func (r *hostPoolHostPolicy) HostUp(host *HostInfo) { + r.AddHost(host) +} + +func (r *hostPoolHostPolicy) HostDown(host *HostInfo) { + r.RemoveHost(host) +} + +func (r *hostPoolHostPolicy) Pick(qry ExecutableQuery) NextHost { + return func() SelectedHost { + r.mu.RLock() + defer r.mu.RUnlock() + + if len(r.hostMap) == 0 { + return nil + } + + hostR := r.hp.Get() + host, ok := r.hostMap[hostR.Host()] + if !ok { + return nil + } + + return selectedHostPoolHost{ + policy: r, + info: host, + hostR: hostR, + } + } +} + +// selectedHostPoolHost is a host returned by the hostPoolHostPolicy and +// implements the SelectedHost interface +type selectedHostPoolHost struct { + policy *hostPoolHostPolicy + info *HostInfo + hostR hostpool.HostPoolResponse +} + +func (host selectedHostPoolHost) Info() *HostInfo { + return host.info +} + +func (host selectedHostPoolHost) Mark(err error) { + ip := host.info.ConnectAddress().String() + + host.policy.mu.RLock() + defer host.policy.mu.RUnlock() + + if _, ok := host.policy.hostMap[ip]; !ok { + // host was removed between pick and mark + return + } + + host.hostR.Mark(err) +} + +type dcAwareRR struct { + local string + localHosts cowHostList + remoteHosts cowHostList + lastUsedHostIdx uint64 +} + +// DCAwareRoundRobinPolicy is a host selection policies which will prioritize and +// return hosts which are in the local datacentre before returning hosts in all +// other datercentres +func DCAwareRoundRobinPolicy(localDC string) HostSelectionPolicy { + return &dcAwareRR{local: localDC} +} + +func (d *dcAwareRR) Init(*Session) {} +func (d *dcAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {} +func (d *dcAwareRR) SetPartitioner(p string) {} + +func (d *dcAwareRR) IsLocal(host *HostInfo) bool { + return host.DataCenter() == d.local +} + +func (d *dcAwareRR) AddHost(host *HostInfo) { + if d.IsLocal(host) { + d.localHosts.add(host) + } else { + d.remoteHosts.add(host) + } +} + +func (d *dcAwareRR) RemoveHost(host *HostInfo) { + if d.IsLocal(host) { + d.localHosts.remove(host.ConnectAddress()) + } else { + d.remoteHosts.remove(host.ConnectAddress()) + } +} + +func (d *dcAwareRR) HostUp(host *HostInfo) { d.AddHost(host) } +func (d *dcAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) } + +// This function is supposed to be called in a fashion +// roundRobbin(offset, hostsPriority1, hostsPriority2, hostsPriority3 ... ) +// +// E.g. for DC-naive strategy: +// roundRobbin(offset, allHosts) +// +// For tiered and DC-aware strategy: +// roundRobbin(offset, localHosts, remoteHosts) +func roundRobbin(shift int, hosts ...[]*HostInfo) NextHost { + currentLayer := 0 + currentlyObserved := 0 + + return func() SelectedHost { + + // iterate over layers + for { + if currentLayer == len(hosts) { + return nil + } + + currentLayerSize := len(hosts[currentLayer]) + + // iterate over hosts within a layer + for { + currentlyObserved++ + if currentlyObserved > currentLayerSize { + currentLayer++ + currentlyObserved = 0 + break + } + + h := hosts[currentLayer][(shift+currentlyObserved)%currentLayerSize] + + if h.IsUp() { + return (*selectedHost)(h) + } + + } + } + } +} + +func (d *dcAwareRR) Pick(q ExecutableQuery) NextHost { + nextStartOffset := atomic.AddUint64(&d.lastUsedHostIdx, 1) + return roundRobbin(int(nextStartOffset), d.localHosts.get(), d.remoteHosts.get()) +} + +// RackAwareRoundRobinPolicy is a host selection policies which will prioritize and +// return hosts which are in the local rack, before hosts in the local datacenter but +// a different rack, before hosts in all other datercentres + +type rackAwareRR struct { + // lastUsedHostIdx keeps the index of the last used host. + // It is accessed atomically and needs to be aligned to 64 bits, so we + // keep it first in the struct. Do not move it or add new struct members + // before it. + lastUsedHostIdx uint64 + localDC string + localRack string + hosts []cowHostList +} + +func RackAwareRoundRobinPolicy(localDC string, localRack string) HostSelectionPolicy { + hosts := make([]cowHostList, 3) + return &rackAwareRR{localDC: localDC, localRack: localRack, hosts: hosts} +} + +func (d *rackAwareRR) Init(*Session) {} +func (d *rackAwareRR) KeyspaceChanged(KeyspaceUpdateEvent) {} +func (d *rackAwareRR) SetPartitioner(p string) {} + +func (d *rackAwareRR) MaxHostTier() uint { + return 2 +} + +func (d *rackAwareRR) HostTier(host *HostInfo) uint { + if host.DataCenter() == d.localDC { + if host.Rack() == d.localRack { + return 0 + } else { + return 1 + } + } else { + return 2 + } +} + +func (d *rackAwareRR) IsLocal(host *HostInfo) bool { + return d.HostTier(host) == 0 +} + +func (d *rackAwareRR) AddHost(host *HostInfo) { + dist := d.HostTier(host) + d.hosts[dist].add(host) +} + +func (d *rackAwareRR) RemoveHost(host *HostInfo) { + dist := d.HostTier(host) + d.hosts[dist].remove(host.ConnectAddress()) +} + +func (d *rackAwareRR) HostUp(host *HostInfo) { d.AddHost(host) } +func (d *rackAwareRR) HostDown(host *HostInfo) { d.RemoveHost(host) } + +func (d *rackAwareRR) Pick(q ExecutableQuery) NextHost { + nextStartOffset := atomic.AddUint64(&d.lastUsedHostIdx, 1) + return roundRobbin(int(nextStartOffset), d.hosts[0].get(), d.hosts[1].get(), d.hosts[2].get()) +} + +// ReadyPolicy defines a policy for when a HostSelectionPolicy can be used. After +// each host connects during session initialization, the Ready method will be +// called. If you only need a single Host to be up you can wrap a +// HostSelectionPolicy policy with SingleHostReadyPolicy. +type ReadyPolicy interface { + Ready() bool +} + +// SingleHostReadyPolicy wraps a HostSelectionPolicy and returns Ready after a +// single host has been added via HostUp +func SingleHostReadyPolicy(p HostSelectionPolicy) *singleHostReadyPolicy { + return &singleHostReadyPolicy{ + HostSelectionPolicy: p, + } +} + +type singleHostReadyPolicy struct { + HostSelectionPolicy + ready bool + readyMux sync.Mutex +} + +func (s *singleHostReadyPolicy) HostUp(host *HostInfo) { + s.HostSelectionPolicy.HostUp(host) + + s.readyMux.Lock() + s.ready = true + s.readyMux.Unlock() +} + +func (s *singleHostReadyPolicy) Ready() bool { + s.readyMux.Lock() + ready := s.ready + s.readyMux.Unlock() + if !ready { + return false + } + + // in case the wrapped policy is also a ReadyPolicy, defer to that + if rdy, ok := s.HostSelectionPolicy.(ReadyPolicy); ok { + return rdy.Ready() + } + return true +} + +// ConvictionPolicy interface is used by gocql to determine if a host should be +// marked as DOWN based on the error and host info +type ConvictionPolicy interface { + // Implementations should return `true` if the host should be convicted, `false` otherwise. + AddFailure(error error, host *HostInfo) bool + //Implementations should clear out any convictions or state regarding the host. + Reset(host *HostInfo) +} + +// SimpleConvictionPolicy implements a ConvictionPolicy which convicts all hosts +// regardless of error +type SimpleConvictionPolicy struct { +} + +func (e *SimpleConvictionPolicy) AddFailure(error error, host *HostInfo) bool { + return true +} + +func (e *SimpleConvictionPolicy) Reset(host *HostInfo) {} + +// ReconnectionPolicy interface is used by gocql to determine if reconnection +// can be attempted after connection error. The interface allows gocql users +// to implement their own logic to determine how to attempt reconnection. +type ReconnectionPolicy interface { + GetInterval(currentRetry int) time.Duration + GetMaxRetries() int +} + +// ConstantReconnectionPolicy has simple logic for returning a fixed reconnection interval. +// +// Examples of usage: +// +// cluster.ReconnectionPolicy = &gocql.ConstantReconnectionPolicy{MaxRetries: 10, Interval: 8 * time.Second} +type ConstantReconnectionPolicy struct { + MaxRetries int + Interval time.Duration +} + +func (c *ConstantReconnectionPolicy) GetInterval(currentRetry int) time.Duration { + return c.Interval +} + +func (c *ConstantReconnectionPolicy) GetMaxRetries() int { + return c.MaxRetries +} + +// ExponentialReconnectionPolicy returns a growing reconnection interval. +type ExponentialReconnectionPolicy struct { + MaxRetries int + InitialInterval time.Duration + MaxInterval time.Duration +} + +func (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Duration { + max := e.MaxInterval + if max < e.InitialInterval { + max = math.MaxInt16 * time.Second + } + return getExponentialTime(e.InitialInterval, max, currentRetry) +} + +func (e *ExponentialReconnectionPolicy) GetMaxRetries() int { + return e.MaxRetries +} + +type SpeculativeExecutionPolicy interface { + Attempts() int + Delay() time.Duration +} + +type NonSpeculativeExecution struct{} + +func (sp NonSpeculativeExecution) Attempts() int { return 0 } // No additional attempts +func (sp NonSpeculativeExecution) Delay() time.Duration { return 1 } // The delay. Must be positive to be used in a ticker. + +type SimpleSpeculativeExecution struct { + NumAttempts int + TimeoutDelay time.Duration +} + +func (sp *SimpleSpeculativeExecution) Attempts() int { return sp.NumAttempts } +func (sp *SimpleSpeculativeExecution) Delay() time.Duration { return sp.TimeoutDelay } diff --git a/vendor/github.com/gocql/gocql/prepared_cache.go b/vendor/github.com/gocql/gocql/prepared_cache.go new file mode 100644 index 000000000..bb7d6978a --- /dev/null +++ b/vendor/github.com/gocql/gocql/prepared_cache.go @@ -0,0 +1,77 @@ +package gocql + +import ( + "bytes" + "github.com/gocql/gocql/internal/lru" + "sync" +) + +const defaultMaxPreparedStmts = 1000 + +// preparedLRU is the prepared statement cache +type preparedLRU struct { + mu sync.Mutex + lru *lru.Cache +} + +func (p *preparedLRU) clear() { + p.mu.Lock() + defer p.mu.Unlock() + + for p.lru.Len() > 0 { + p.lru.RemoveOldest() + } +} + +func (p *preparedLRU) add(key string, val *inflightPrepare) { + p.mu.Lock() + defer p.mu.Unlock() + p.lru.Add(key, val) +} + +func (p *preparedLRU) remove(key string) bool { + p.mu.Lock() + defer p.mu.Unlock() + return p.lru.Remove(key) +} + +func (p *preparedLRU) execIfMissing(key string, fn func(lru *lru.Cache) *inflightPrepare) (*inflightPrepare, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + val, ok := p.lru.Get(key) + if ok { + return val.(*inflightPrepare), true + } + + return fn(p.lru), false +} + +func (p *preparedLRU) keyFor(hostID, keyspace, statement string) string { + // TODO: we should just use a struct for the key in the map + return hostID + keyspace + statement +} + +func (p *preparedLRU) evictPreparedID(key string, id []byte) { + p.mu.Lock() + defer p.mu.Unlock() + + val, ok := p.lru.Get(key) + if !ok { + return + } + + ifp, ok := val.(*inflightPrepare) + if !ok { + return + } + + select { + case <-ifp.done: + if bytes.Equal(id, ifp.preparedStatment.id) { + p.lru.Remove(key) + } + default: + } + +} diff --git a/vendor/github.com/gocql/gocql/query_executor.go b/vendor/github.com/gocql/gocql/query_executor.go new file mode 100644 index 000000000..d5b53b0c8 --- /dev/null +++ b/vendor/github.com/gocql/gocql/query_executor.go @@ -0,0 +1,180 @@ +package gocql + +import ( + "context" + "sync" + "time" +) + +type ExecutableQuery interface { + borrowForExecution() // Used to ensure that the query stays alive for lifetime of a particular execution goroutine. + releaseAfterExecution() // Used when a goroutine finishes its execution attempts, either with ok result or an error. + execute(ctx context.Context, conn *Conn) *Iter + attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) + retryPolicy() RetryPolicy + speculativeExecutionPolicy() SpeculativeExecutionPolicy + GetRoutingKey() ([]byte, error) + Keyspace() string + Table() string + IsIdempotent() bool + + withContext(context.Context) ExecutableQuery + + RetryableQuery +} + +type queryExecutor struct { + pool *policyConnPool + policy HostSelectionPolicy +} + +func (q *queryExecutor) attemptQuery(ctx context.Context, qry ExecutableQuery, conn *Conn) *Iter { + start := time.Now() + iter := qry.execute(ctx, conn) + end := time.Now() + + qry.attempt(q.pool.keyspace, end, start, iter, conn.host) + + return iter +} + +func (q *queryExecutor) speculate(ctx context.Context, qry ExecutableQuery, sp SpeculativeExecutionPolicy, + hostIter NextHost, results chan *Iter) *Iter { + ticker := time.NewTicker(sp.Delay()) + defer ticker.Stop() + + for i := 0; i < sp.Attempts(); i++ { + select { + case <-ticker.C: + qry.borrowForExecution() // ensure liveness in case of executing Query to prevent races with Query.Release(). + go q.run(ctx, qry, hostIter, results) + case <-ctx.Done(): + return &Iter{err: ctx.Err()} + case iter := <-results: + return iter + } + } + + return nil +} + +func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { + hostIter := q.policy.Pick(qry) + + // check if the query is not marked as idempotent, if + // it is, we force the policy to NonSpeculative + sp := qry.speculativeExecutionPolicy() + if !qry.IsIdempotent() || sp.Attempts() == 0 { + return q.do(qry.Context(), qry, hostIter), nil + } + + // When speculative execution is enabled, we could be accessing the host iterator from multiple goroutines below. + // To ensure we don't call it concurrently, we wrap the returned NextHost function here to synchronize access to it. + var mu sync.Mutex + origHostIter := hostIter + hostIter = func() SelectedHost { + mu.Lock() + defer mu.Unlock() + return origHostIter() + } + + ctx, cancel := context.WithCancel(qry.Context()) + defer cancel() + + results := make(chan *Iter, 1) + + // Launch the main execution + qry.borrowForExecution() // ensure liveness in case of executing Query to prevent races with Query.Release(). + go q.run(ctx, qry, hostIter, results) + + // The speculative executions are launched _in addition_ to the main + // execution, on a timer. So Speculation{2} would make 3 executions running + // in total. + if iter := q.speculate(ctx, qry, sp, hostIter, results); iter != nil { + return iter, nil + } + + select { + case iter := <-results: + return iter, nil + case <-ctx.Done(): + return &Iter{err: ctx.Err()}, nil + } +} + +func (q *queryExecutor) do(ctx context.Context, qry ExecutableQuery, hostIter NextHost) *Iter { + selectedHost := hostIter() + rt := qry.retryPolicy() + + var lastErr error + var iter *Iter + for selectedHost != nil { + host := selectedHost.Info() + if host == nil || !host.IsUp() { + selectedHost = hostIter() + continue + } + + pool, ok := q.pool.getPool(host) + if !ok { + selectedHost = hostIter() + continue + } + + conn := pool.Pick() + if conn == nil { + selectedHost = hostIter() + continue + } + + iter = q.attemptQuery(ctx, qry, conn) + iter.host = selectedHost.Info() + // Update host + switch iter.err { + case context.Canceled, context.DeadlineExceeded, ErrNotFound: + // those errors represents logical errors, they should not count + // toward removing a node from the pool + selectedHost.Mark(nil) + return iter + default: + selectedHost.Mark(iter.err) + } + + // Exit if the query was successful + // or no retry policy defined or retry attempts were reached + if iter.err == nil || rt == nil || !rt.Attempt(qry) { + return iter + } + lastErr = iter.err + + // If query is unsuccessful, check the error with RetryPolicy to retry + switch rt.GetRetryType(iter.err) { + case Retry: + // retry on the same host + continue + case Rethrow, Ignore: + return iter + case RetryNextHost: + // retry on the next host + selectedHost = hostIter() + continue + default: + // Undefined? Return nil and error, this will panic in the requester + return &Iter{err: ErrUnknownRetryType} + } + } + + if lastErr != nil { + return &Iter{err: lastErr} + } + + return &Iter{err: ErrNoConnections} +} + +func (q *queryExecutor) run(ctx context.Context, qry ExecutableQuery, hostIter NextHost, results chan<- *Iter) { + select { + case results <- q.do(ctx, qry, hostIter): + case <-ctx.Done(): + } + qry.releaseAfterExecution() +} diff --git a/vendor/github.com/gocql/gocql/ring.go b/vendor/github.com/gocql/gocql/ring.go new file mode 100644 index 000000000..5b77370a1 --- /dev/null +++ b/vendor/github.com/gocql/gocql/ring.go @@ -0,0 +1,143 @@ +package gocql + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type ring struct { + // endpoints are the set of endpoints which the driver will attempt to connect + // to in the case it can not reach any of its hosts. They are also used to boot + // strap the initial connection. + endpoints []*HostInfo + + mu sync.RWMutex + // hosts are the set of all hosts in the cassandra ring that we know of. + // key of map is host_id. + hosts map[string]*HostInfo + // hostIPToUUID maps host native address to host_id. + hostIPToUUID map[string]string + + hostList []*HostInfo + pos uint32 + + // TODO: we should store the ring metadata here also. +} + +func (r *ring) rrHost() *HostInfo { + r.mu.RLock() + defer r.mu.RUnlock() + if len(r.hostList) == 0 { + return nil + } + + pos := int(atomic.AddUint32(&r.pos, 1) - 1) + return r.hostList[pos%len(r.hostList)] +} + +func (r *ring) getHostByIP(ip string) (*HostInfo, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + hi, ok := r.hostIPToUUID[ip] + return r.hosts[hi], ok +} + +func (r *ring) getHost(hostID string) *HostInfo { + r.mu.RLock() + host := r.hosts[hostID] + r.mu.RUnlock() + return host +} + +func (r *ring) allHosts() []*HostInfo { + r.mu.RLock() + hosts := make([]*HostInfo, 0, len(r.hosts)) + for _, host := range r.hosts { + hosts = append(hosts, host) + } + r.mu.RUnlock() + return hosts +} + +func (r *ring) currentHosts() map[string]*HostInfo { + r.mu.RLock() + hosts := make(map[string]*HostInfo, len(r.hosts)) + for k, v := range r.hosts { + hosts[k] = v + } + r.mu.RUnlock() + return hosts +} + +func (r *ring) addOrUpdate(host *HostInfo) *HostInfo { + if existingHost, ok := r.addHostIfMissing(host); ok { + existingHost.update(host) + host = existingHost + } + return host +} + +func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) { + if host.invalidConnectAddr() { + panic(fmt.Sprintf("invalid host: %v", host)) + } + hostID := host.HostID() + + r.mu.Lock() + if r.hosts == nil { + r.hosts = make(map[string]*HostInfo) + } + if r.hostIPToUUID == nil { + r.hostIPToUUID = make(map[string]string) + } + + existing, ok := r.hosts[hostID] + if !ok { + r.hosts[hostID] = host + r.hostIPToUUID[host.nodeToNodeAddress().String()] = hostID + existing = host + r.hostList = append(r.hostList, host) + } + r.mu.Unlock() + return existing, ok +} + +func (r *ring) removeHost(hostID string) bool { + r.mu.Lock() + if r.hosts == nil { + r.hosts = make(map[string]*HostInfo) + } + if r.hostIPToUUID == nil { + r.hostIPToUUID = make(map[string]string) + } + + h, ok := r.hosts[hostID] + if ok { + for i, host := range r.hostList { + if host.HostID() == hostID { + r.hostList = append(r.hostList[:i], r.hostList[i+1:]...) + break + } + } + delete(r.hostIPToUUID, h.nodeToNodeAddress().String()) + } + delete(r.hosts, hostID) + r.mu.Unlock() + return ok +} + +type clusterMetadata struct { + mu sync.RWMutex + partitioner string +} + +func (c *clusterMetadata) setPartitioner(partitioner string) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.partitioner != partitioner { + // TODO: update other things now + c.partitioner = partitioner + } +} diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go new file mode 100644 index 000000000..0eac4cf0e --- /dev/null +++ b/vendor/github.com/gocql/gocql/session.go @@ -0,0 +1,2282 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + + "github.com/gocql/gocql/internal/lru" +) + +// Session is the interface used by users to interact with the database. +// +// It's safe for concurrent use by multiple goroutines and a typical usage +// scenario is to have one global session object to interact with the +// whole Cassandra cluster. +// +// This type extends the Node interface by adding a convenient query builder +// and automatically sets a default consistency level on all operations +// that do not have a consistency level set. +type Session struct { + cons Consistency + pageSize int + prefetch float64 + routingKeyInfoCache routingKeyInfoLRU + schemaDescriber *schemaDescriber + trace Tracer + queryObserver QueryObserver + batchObserver BatchObserver + connectObserver ConnectObserver + frameObserver FrameHeaderObserver + streamObserver StreamObserver + hostSource *ringDescriber + ringRefresher *refreshDebouncer + stmtsLRU *preparedLRU + + connCfg *ConnConfig + + executor *queryExecutor + pool *policyConnPool + policy HostSelectionPolicy + + ring ring + metadata clusterMetadata + + mu sync.RWMutex + + control *controlConn + + // event handlers + nodeEvents *eventDebouncer + schemaEvents *eventDebouncer + + // ring metadata + useSystemSchema bool + hasAggregatesAndFunctions bool + + cfg ClusterConfig + + ctx context.Context + cancel context.CancelFunc + + // sessionStateMu protects isClosed and isInitialized. + sessionStateMu sync.RWMutex + // isClosed is true once Session.Close is finished. + isClosed bool + // isClosing bool is true once Session.Close is started. + isClosing bool + // isInitialized is true once Session.init succeeds. + // you can use initialized() to read the value. + isInitialized bool + + logger StdLogger +} + +var queryPool = &sync.Pool{ + New: func() interface{} { + return &Query{routingInfo: &queryRoutingInfo{}, refCount: 1} + }, +} + +func addrsToHosts(addrs []string, defaultPort int, logger StdLogger) ([]*HostInfo, error) { + var hosts []*HostInfo + for _, hostaddr := range addrs { + resolvedHosts, err := hostInfo(hostaddr, defaultPort) + if err != nil { + // Try other hosts if unable to resolve DNS name + if _, ok := err.(*net.DNSError); ok { + logger.Printf("gocql: dns error: %v\n", err) + continue + } + return nil, err + } + + hosts = append(hosts, resolvedHosts...) + } + if len(hosts) == 0 { + return nil, errors.New("failed to resolve any of the provided hostnames") + } + return hosts, nil +} + +// NewSession wraps an existing Node. +func NewSession(cfg ClusterConfig) (*Session, error) { + // Check that hosts in the ClusterConfig is not empty + if len(cfg.Hosts) < 1 { + return nil, ErrNoHosts + } + + // Check that either Authenticator is set or AuthProvider, not both + if cfg.Authenticator != nil && cfg.AuthProvider != nil { + return nil, errors.New("Can't use both Authenticator and AuthProvider in cluster config.") + } + + // TODO: we should take a context in here at some point + ctx, cancel := context.WithCancel(context.TODO()) + + s := &Session{ + cons: cfg.Consistency, + prefetch: 0.25, + cfg: cfg, + pageSize: cfg.PageSize, + stmtsLRU: &preparedLRU{lru: lru.New(cfg.MaxPreparedStmts)}, + connectObserver: cfg.ConnectObserver, + ctx: ctx, + cancel: cancel, + logger: cfg.logger(), + } + + s.schemaDescriber = newSchemaDescriber(s) + + s.nodeEvents = newEventDebouncer("NodeEvents", s.handleNodeEvent, s.logger) + s.schemaEvents = newEventDebouncer("SchemaEvents", s.handleSchemaEvent, s.logger) + + s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo) + + s.hostSource = &ringDescriber{session: s} + s.ringRefresher = newRefreshDebouncer(ringRefreshDebounceTime, func() error { return refreshRing(s.hostSource) }) + + if cfg.PoolConfig.HostSelectionPolicy == nil { + cfg.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy() + } + s.pool = cfg.PoolConfig.buildPool(s) + + s.policy = cfg.PoolConfig.HostSelectionPolicy + s.policy.Init(s) + + s.executor = &queryExecutor{ + pool: s.pool, + policy: cfg.PoolConfig.HostSelectionPolicy, + } + + s.queryObserver = cfg.QueryObserver + s.batchObserver = cfg.BatchObserver + s.connectObserver = cfg.ConnectObserver + s.frameObserver = cfg.FrameHeaderObserver + s.streamObserver = cfg.StreamObserver + + //Check the TLS Config before trying to connect to anything external + connCfg, err := connConfig(&s.cfg) + if err != nil { + //TODO: Return a typed error + return nil, fmt.Errorf("gocql: unable to create session: %v", err) + } + s.connCfg = connCfg + + if err := s.init(); err != nil { + s.Close() + if err == ErrNoConnectionsStarted { + //This error used to be generated inside NewSession & returned directly + //Forward it on up to be backwards compatible + return nil, ErrNoConnectionsStarted + } else { + // TODO(zariel): dont wrap this error in fmt.Errorf, return a typed error + return nil, fmt.Errorf("gocql: unable to create session: %v", err) + } + } + + return s, nil +} + +func (s *Session) init() error { + hosts, err := addrsToHosts(s.cfg.Hosts, s.cfg.Port, s.logger) + if err != nil { + return err + } + s.ring.endpoints = hosts + + if !s.cfg.disableControlConn { + s.control = createControlConn(s) + if s.cfg.ProtoVersion == 0 { + proto, err := s.control.discoverProtocol(hosts) + if err != nil { + return fmt.Errorf("unable to discover protocol version: %v", err) + } else if proto == 0 { + return errors.New("unable to discovery protocol version") + } + + // TODO(zariel): we really only need this in 1 place + s.cfg.ProtoVersion = proto + s.connCfg.ProtoVersion = proto + } + + if err := s.control.connect(hosts); err != nil { + return err + } + + if !s.cfg.DisableInitialHostLookup { + var partitioner string + newHosts, partitioner, err := s.hostSource.GetHosts() + if err != nil { + return err + } + s.policy.SetPartitioner(partitioner) + filteredHosts := make([]*HostInfo, 0, len(newHosts)) + for _, host := range newHosts { + if !s.cfg.filterHost(host) { + filteredHosts = append(filteredHosts, host) + } + } + + hosts = filteredHosts + } + } + + for _, host := range hosts { + // In case when host lookup is disabled and when we are in unit tests, + // host are not discovered, and we are missing host ID information used + // by internal logic. + // Associate random UUIDs here with all hosts missing this information. + if len(host.HostID()) == 0 { + host.SetHostID(MustRandomUUID().String()) + } + } + + hostMap := make(map[string]*HostInfo, len(hosts)) + for _, host := range hosts { + hostMap[host.HostID()] = host + } + + hosts = hosts[:0] + // each host will increment left and decrement it after connecting and once + // there's none left, we'll close hostCh + var left int64 + // we will receive up to len(hostMap) of messages so create a buffer so we + // don't end up stuck in a goroutine if we stopped listening + connectedCh := make(chan struct{}, len(hostMap)) + // we add one here because we don't want to end up closing hostCh until we're + // done looping and the decerement code might be reached before we've looped + // again + atomic.AddInt64(&left, 1) + for _, host := range hostMap { + host := s.ring.addOrUpdate(host) + if s.cfg.filterHost(host) { + continue + } + + atomic.AddInt64(&left, 1) + go func() { + s.pool.addHost(host) + connectedCh <- struct{}{} + + // if there are no hosts left, then close the hostCh to unblock the loop + // below if its still waiting + if atomic.AddInt64(&left, -1) == 0 { + close(connectedCh) + } + }() + + hosts = append(hosts, host) + } + // once we're done looping we subtract the one we initially added and check + // to see if we should close + if atomic.AddInt64(&left, -1) == 0 { + close(connectedCh) + } + + // before waiting for them to connect, add them all to the policy so we can + // utilize efficiencies by calling AddHosts if the policy supports it + type bulkAddHosts interface { + AddHosts([]*HostInfo) + } + if v, ok := s.policy.(bulkAddHosts); ok { + v.AddHosts(hosts) + } else { + for _, host := range hosts { + s.policy.AddHost(host) + } + } + + readyPolicy, _ := s.policy.(ReadyPolicy) + // now loop over connectedCh until it's closed (meaning we've connected to all) + // or until the policy says we're ready + for range connectedCh { + if readyPolicy != nil && readyPolicy.Ready() { + break + } + } + + // TODO(zariel): we probably dont need this any more as we verify that we + // can connect to one of the endpoints supplied by using the control conn. + // See if there are any connections in the pool + if s.cfg.ReconnectInterval > 0 { + go s.reconnectDownedHosts(s.cfg.ReconnectInterval) + } + + // If we disable the initial host lookup, we need to still check if the + // cluster is using the newer system schema or not... however, if control + // connection is disable, we really have no choice, so we just make our + // best guess... + if !s.cfg.disableControlConn && s.cfg.DisableInitialHostLookup { + newer, _ := checkSystemSchema(s.control) + s.useSystemSchema = newer + } else { + version := s.ring.rrHost().Version() + s.useSystemSchema = version.AtLeast(3, 0, 0) + s.hasAggregatesAndFunctions = version.AtLeast(2, 2, 0) + } + + if s.pool.Size() == 0 { + return ErrNoConnectionsStarted + } + + // Invoke KeyspaceChanged to let the policy cache the session keyspace + // parameters. This is used by tokenAwareHostPolicy to discover replicas. + if !s.cfg.disableControlConn && s.cfg.Keyspace != "" { + s.policy.KeyspaceChanged(KeyspaceUpdateEvent{Keyspace: s.cfg.Keyspace}) + } + + s.sessionStateMu.Lock() + s.isInitialized = true + s.sessionStateMu.Unlock() + + return nil +} + +// AwaitSchemaAgreement will wait until schema versions across all nodes in the +// cluster are the same (as seen from the point of view of the control connection). +// The maximum amount of time this takes is governed +// by the MaxWaitSchemaAgreement setting in the configuration (default: 60s). +// AwaitSchemaAgreement returns an error in case schema versions are not the same +// after the timeout specified in MaxWaitSchemaAgreement elapses. +func (s *Session) AwaitSchemaAgreement(ctx context.Context) error { + if s.cfg.disableControlConn { + return errNoControl + } + return s.control.withConn(func(conn *Conn) *Iter { + return &Iter{err: conn.awaitSchemaAgreement(ctx)} + }).err +} + +func (s *Session) reconnectDownedHosts(intv time.Duration) { + reconnectTicker := time.NewTicker(intv) + defer reconnectTicker.Stop() + + for { + select { + case <-reconnectTicker.C: + hosts := s.ring.allHosts() + + // Print session.ring for debug. + if gocqlDebug { + buf := bytes.NewBufferString("Session.ring:") + for _, h := range hosts { + buf.WriteString("[" + h.ConnectAddress().String() + ":" + h.State().String() + "]") + } + s.logger.Println(buf.String()) + } + + for _, h := range hosts { + if h.IsUp() { + continue + } + // we let the pool call handleNodeConnected to change the host state + s.pool.addHost(h) + } + case <-s.ctx.Done(): + return + } + } +} + +// SetConsistency sets the default consistency level for this session. This +// setting can also be changed on a per-query basis and the default value +// is Quorum. +func (s *Session) SetConsistency(cons Consistency) { + s.mu.Lock() + s.cons = cons + s.mu.Unlock() +} + +// SetPageSize sets the default page size for this session. A value <= 0 will +// disable paging. This setting can also be changed on a per-query basis. +func (s *Session) SetPageSize(n int) { + s.mu.Lock() + s.pageSize = n + s.mu.Unlock() +} + +// SetPrefetch sets the default threshold for pre-fetching new pages. If +// there are only p*pageSize rows remaining, the next page will be requested +// automatically. This value can also be changed on a per-query basis and +// the default value is 0.25. +func (s *Session) SetPrefetch(p float64) { + s.mu.Lock() + s.prefetch = p + s.mu.Unlock() +} + +// SetTrace sets the default tracer for this session. This setting can also +// be changed on a per-query basis. +func (s *Session) SetTrace(trace Tracer) { + s.mu.Lock() + s.trace = trace + s.mu.Unlock() +} + +// Query generates a new query object for interacting with the database. +// Further details of the query may be tweaked using the resulting query +// value before the query is executed. Query is automatically prepared +// if it has not previously been executed. +func (s *Session) Query(stmt string, values ...interface{}) *Query { + qry := queryPool.Get().(*Query) + qry.session = s + qry.stmt = stmt + qry.values = values + qry.defaultsFromSession() + return qry +} + +type QueryInfo struct { + Id []byte + Args []ColumnInfo + Rval []ColumnInfo + PKeyColumns []int +} + +// Bind generates a new query object based on the query statement passed in. +// The query is automatically prepared if it has not previously been executed. +// The binding callback allows the application to define which query argument +// values will be marshalled as part of the query execution. +// During execution, the meta data of the prepared query will be routed to the +// binding callback, which is responsible for producing the query argument values. +func (s *Session) Bind(stmt string, b func(q *QueryInfo) ([]interface{}, error)) *Query { + qry := queryPool.Get().(*Query) + qry.session = s + qry.stmt = stmt + qry.binding = b + qry.defaultsFromSession() + return qry +} + +// Close closes all connections. The session is unusable after this +// operation. +func (s *Session) Close() { + + s.sessionStateMu.Lock() + if s.isClosing { + s.sessionStateMu.Unlock() + return + } + s.isClosing = true + s.sessionStateMu.Unlock() + + if s.pool != nil { + s.pool.Close() + } + + if s.control != nil { + s.control.close() + } + + if s.nodeEvents != nil { + s.nodeEvents.stop() + } + + if s.schemaEvents != nil { + s.schemaEvents.stop() + } + + if s.ringRefresher != nil { + s.ringRefresher.stop() + } + + if s.cancel != nil { + s.cancel() + } + + s.sessionStateMu.Lock() + s.isClosed = true + s.sessionStateMu.Unlock() +} + +func (s *Session) Closed() bool { + s.sessionStateMu.RLock() + closed := s.isClosed + s.sessionStateMu.RUnlock() + return closed +} + +func (s *Session) initialized() bool { + s.sessionStateMu.RLock() + initialized := s.isInitialized + s.sessionStateMu.RUnlock() + return initialized +} + +func (s *Session) executeQuery(qry *Query) (it *Iter) { + // fail fast + if s.Closed() { + return &Iter{err: ErrSessionClosed} + } + + iter, err := s.executor.executeQuery(qry) + if err != nil { + return &Iter{err: err} + } + if iter == nil { + panic("nil iter") + } + + return iter +} + +func (s *Session) removeHost(h *HostInfo) { + s.policy.RemoveHost(h) + hostID := h.HostID() + s.pool.removeHost(hostID) + s.ring.removeHost(hostID) +} + +// KeyspaceMetadata returns the schema metadata for the keyspace specified. Returns an error if the keyspace does not exist. +func (s *Session) KeyspaceMetadata(keyspace string) (*KeyspaceMetadata, error) { + // fail fast + if s.Closed() { + return nil, ErrSessionClosed + } else if keyspace == "" { + return nil, ErrNoKeyspace + } + + return s.schemaDescriber.getSchema(keyspace) +} + +func (s *Session) getConn() *Conn { + hosts := s.ring.allHosts() + for _, host := range hosts { + if !host.IsUp() { + continue + } + + pool, ok := s.pool.getPool(host) + if !ok { + continue + } else if conn := pool.Pick(); conn != nil { + return conn + } + } + + return nil +} + +// returns routing key indexes and type info +func (s *Session) routingKeyInfo(ctx context.Context, stmt string) (*routingKeyInfo, error) { + s.routingKeyInfoCache.mu.Lock() + + entry, cached := s.routingKeyInfoCache.lru.Get(stmt) + if cached { + // done accessing the cache + s.routingKeyInfoCache.mu.Unlock() + // the entry is an inflight struct similar to that used by + // Conn to prepare statements + inflight := entry.(*inflightCachedEntry) + + // wait for any inflight work + inflight.wg.Wait() + + if inflight.err != nil { + return nil, inflight.err + } + + key, _ := inflight.value.(*routingKeyInfo) + + return key, nil + } + + // create a new inflight entry while the data is created + inflight := new(inflightCachedEntry) + inflight.wg.Add(1) + defer inflight.wg.Done() + s.routingKeyInfoCache.lru.Add(stmt, inflight) + s.routingKeyInfoCache.mu.Unlock() + + var ( + info *preparedStatment + partitionKey []*ColumnMetadata + ) + + conn := s.getConn() + if conn == nil { + // TODO: better error? + inflight.err = errors.New("gocql: unable to fetch prepared info: no connection available") + return nil, inflight.err + } + + // get the query info for the statement + info, inflight.err = conn.prepareStatement(ctx, stmt, nil) + if inflight.err != nil { + // don't cache this error + s.routingKeyInfoCache.Remove(stmt) + return nil, inflight.err + } + + // TODO: it would be nice to mark hosts here but as we are not using the policies + // to fetch hosts we cant + + if info.request.colCount == 0 { + // no arguments, no routing key, and no error + return nil, nil + } + + table := info.request.table + keyspace := info.request.keyspace + + if len(info.request.pkeyColumns) > 0 { + // proto v4 dont need to calculate primary key columns + types := make([]TypeInfo, len(info.request.pkeyColumns)) + for i, col := range info.request.pkeyColumns { + types[i] = info.request.columns[col].TypeInfo + } + + routingKeyInfo := &routingKeyInfo{ + indexes: info.request.pkeyColumns, + types: types, + keyspace: keyspace, + table: table, + } + + inflight.value = routingKeyInfo + return routingKeyInfo, nil + } + + var keyspaceMetadata *KeyspaceMetadata + keyspaceMetadata, inflight.err = s.KeyspaceMetadata(info.request.columns[0].Keyspace) + if inflight.err != nil { + // don't cache this error + s.routingKeyInfoCache.Remove(stmt) + return nil, inflight.err + } + + tableMetadata, found := keyspaceMetadata.Tables[table] + if !found { + // unlikely that the statement could be prepared and the metadata for + // the table couldn't be found, but this may indicate either a bug + // in the metadata code, or that the table was just dropped. + inflight.err = ErrNoMetadata + // don't cache this error + s.routingKeyInfoCache.Remove(stmt) + return nil, inflight.err + } + + partitionKey = tableMetadata.PartitionKey + + size := len(partitionKey) + routingKeyInfo := &routingKeyInfo{ + indexes: make([]int, size), + types: make([]TypeInfo, size), + keyspace: keyspace, + table: table, + } + + for keyIndex, keyColumn := range partitionKey { + // set an indicator for checking if the mapping is missing + routingKeyInfo.indexes[keyIndex] = -1 + + // find the column in the query info + for argIndex, boundColumn := range info.request.columns { + if keyColumn.Name == boundColumn.Name { + // there may be many such bound columns, pick the first + routingKeyInfo.indexes[keyIndex] = argIndex + routingKeyInfo.types[keyIndex] = boundColumn.TypeInfo + break + } + } + + if routingKeyInfo.indexes[keyIndex] == -1 { + // missing a routing key column mapping + // no routing key, and no error + return nil, nil + } + } + + // cache this result + inflight.value = routingKeyInfo + + return routingKeyInfo, nil +} + +func (b *Batch) execute(ctx context.Context, conn *Conn) *Iter { + return conn.executeBatch(ctx, b) +} + +func (s *Session) executeBatch(batch *Batch) *Iter { + // fail fast + if s.Closed() { + return &Iter{err: ErrSessionClosed} + } + + // Prevent the execution of the batch if greater than the limit + // Currently batches have a limit of 65536 queries. + // https://datastax-oss.atlassian.net/browse/JAVA-229 + if batch.Size() > BatchSizeMaximum { + return &Iter{err: ErrTooManyStmts} + } + + iter, err := s.executor.executeQuery(batch) + if err != nil { + return &Iter{err: err} + } + + return iter +} + +// ExecuteBatch executes a batch operation and returns nil if successful +// otherwise an error is returned describing the failure. +func (s *Session) ExecuteBatch(batch *Batch) error { + iter := s.executeBatch(batch) + return iter.Close() +} + +// ExecuteBatchCAS executes a batch operation and returns true if successful and +// an iterator (to scan additional rows if more than one conditional statement) +// was sent. +// Further scans on the interator must also remember to include +// the applied boolean as the first argument to *Iter.Scan +func (s *Session) ExecuteBatchCAS(batch *Batch, dest ...interface{}) (applied bool, iter *Iter, err error) { + iter = s.executeBatch(batch) + if err := iter.checkErrAndNotFound(); err != nil { + iter.Close() + return false, nil, err + } + + if len(iter.Columns()) > 1 { + dest = append([]interface{}{&applied}, dest...) + iter.Scan(dest...) + } else { + iter.Scan(&applied) + } + + return applied, iter, nil +} + +// MapExecuteBatchCAS executes a batch operation much like ExecuteBatchCAS, +// however it accepts a map rather than a list of arguments for the initial +// scan. +func (s *Session) MapExecuteBatchCAS(batch *Batch, dest map[string]interface{}) (applied bool, iter *Iter, err error) { + iter = s.executeBatch(batch) + if err := iter.checkErrAndNotFound(); err != nil { + iter.Close() + return false, nil, err + } + iter.MapScan(dest) + applied = dest["[applied]"].(bool) + delete(dest, "[applied]") + + // we usually close here, but instead of closing, just returin an error + // if MapScan failed. Although Close just returns err, using Close + // here might be confusing as we are not actually closing the iter + return applied, iter, iter.err +} + +type hostMetrics struct { + // Attempts is count of how many times this query has been attempted for this host. + // An attempt is either a retry or fetching next page of results. + Attempts int + + // TotalLatency is the sum of attempt latencies for this host in nanoseconds. + TotalLatency int64 +} + +type queryMetrics struct { + l sync.RWMutex + m map[string]*hostMetrics + // totalAttempts is total number of attempts. + // Equal to sum of all hostMetrics' Attempts. + totalAttempts int +} + +// preFilledQueryMetrics initializes new queryMetrics based on per-host supplied data. +func preFilledQueryMetrics(m map[string]*hostMetrics) *queryMetrics { + qm := &queryMetrics{m: m} + for _, hm := range qm.m { + qm.totalAttempts += hm.Attempts + } + return qm +} + +// hostMetrics returns a snapshot of metrics for given host. +// If the metrics for host don't exist, they are created. +func (qm *queryMetrics) hostMetrics(host *HostInfo) *hostMetrics { + qm.l.Lock() + metrics := qm.hostMetricsLocked(host) + copied := new(hostMetrics) + *copied = *metrics + qm.l.Unlock() + return copied +} + +// hostMetricsLocked gets or creates host metrics for given host. +// It must be called only while holding qm.l lock. +func (qm *queryMetrics) hostMetricsLocked(host *HostInfo) *hostMetrics { + metrics, exists := qm.m[host.ConnectAddress().String()] + if !exists { + // if the host is not in the map, it means it's been accessed for the first time + metrics = &hostMetrics{} + qm.m[host.ConnectAddress().String()] = metrics + } + + return metrics +} + +// attempts returns the number of times the query was executed. +func (qm *queryMetrics) attempts() int { + qm.l.Lock() + attempts := qm.totalAttempts + qm.l.Unlock() + return attempts +} + +func (qm *queryMetrics) latency() int64 { + qm.l.Lock() + var ( + attempts int + latency int64 + ) + for _, metric := range qm.m { + attempts += metric.Attempts + latency += metric.TotalLatency + } + qm.l.Unlock() + if attempts > 0 { + return latency / int64(attempts) + } + return 0 +} + +// attempt adds given number of attempts and latency for given host. +// It returns previous total attempts. +// If needsHostMetrics is true, a copy of updated hostMetrics is returned. +func (qm *queryMetrics) attempt(addAttempts int, addLatency time.Duration, + host *HostInfo, needsHostMetrics bool) (int, *hostMetrics) { + qm.l.Lock() + + totalAttempts := qm.totalAttempts + qm.totalAttempts += addAttempts + + updateHostMetrics := qm.hostMetricsLocked(host) + updateHostMetrics.Attempts += addAttempts + updateHostMetrics.TotalLatency += addLatency.Nanoseconds() + + var hostMetricsCopy *hostMetrics + if needsHostMetrics { + hostMetricsCopy = new(hostMetrics) + *hostMetricsCopy = *updateHostMetrics + } + + qm.l.Unlock() + return totalAttempts, hostMetricsCopy +} + +// Query represents a CQL statement that can be executed. +type Query struct { + stmt string + values []interface{} + cons Consistency + pageSize int + routingKey []byte + pageState []byte + prefetch float64 + trace Tracer + observer QueryObserver + session *Session + conn *Conn + rt RetryPolicy + spec SpeculativeExecutionPolicy + binding func(q *QueryInfo) ([]interface{}, error) + serialCons SerialConsistency + defaultTimestamp bool + defaultTimestampValue int64 + disableSkipMetadata bool + context context.Context + idempotent bool + customPayload map[string][]byte + metrics *queryMetrics + refCount uint32 + + disableAutoPage bool + + // getKeyspace is field so that it can be overriden in tests + getKeyspace func() string + + // used by control conn queries to prevent triggering a write to systems + // tables in AWS MCS see + skipPrepare bool + + // routingInfo is a pointer because Query can be copied and copyable struct can't hold a mutex. + routingInfo *queryRoutingInfo +} + +type queryRoutingInfo struct { + // mu protects contents of queryRoutingInfo. + mu sync.RWMutex + + keyspace string + + table string +} + +func (q *Query) defaultsFromSession() { + s := q.session + + s.mu.RLock() + q.cons = s.cons + q.pageSize = s.pageSize + q.trace = s.trace + q.observer = s.queryObserver + q.prefetch = s.prefetch + q.rt = s.cfg.RetryPolicy + q.serialCons = s.cfg.SerialConsistency + q.defaultTimestamp = s.cfg.DefaultTimestamp + q.idempotent = s.cfg.DefaultIdempotence + q.metrics = &queryMetrics{m: make(map[string]*hostMetrics)} + + q.spec = &NonSpeculativeExecution{} + s.mu.RUnlock() +} + +// Statement returns the statement that was used to generate this query. +func (q Query) Statement() string { + return q.stmt +} + +// Values returns the values passed in via Bind. +// This can be used by a wrapper type that needs to access the bound values. +func (q Query) Values() []interface{} { + return q.values +} + +// String implements the stringer interface. +func (q Query) String() string { + return fmt.Sprintf("[query statement=%q values=%+v consistency=%s]", q.stmt, q.values, q.cons) +} + +// Attempts returns the number of times the query was executed. +func (q *Query) Attempts() int { + return q.metrics.attempts() +} + +func (q *Query) AddAttempts(i int, host *HostInfo) { + q.metrics.attempt(i, 0, host, false) +} + +// Latency returns the average amount of nanoseconds per attempt of the query. +func (q *Query) Latency() int64 { + return q.metrics.latency() +} + +func (q *Query) AddLatency(l int64, host *HostInfo) { + q.metrics.attempt(0, time.Duration(l)*time.Nanosecond, host, false) +} + +// Consistency sets the consistency level for this query. If no consistency +// level have been set, the default consistency level of the cluster +// is used. +func (q *Query) Consistency(c Consistency) *Query { + q.cons = c + return q +} + +// GetConsistency returns the currently configured consistency level for +// the query. +func (q *Query) GetConsistency() Consistency { + return q.cons +} + +// Same as Consistency but without a return value +func (q *Query) SetConsistency(c Consistency) { + q.cons = c +} + +// CustomPayload sets the custom payload level for this query. +func (q *Query) CustomPayload(customPayload map[string][]byte) *Query { + q.customPayload = customPayload + return q +} + +func (q *Query) Context() context.Context { + if q.context == nil { + return context.Background() + } + return q.context +} + +// Trace enables tracing of this query. Look at the documentation of the +// Tracer interface to learn more about tracing. +func (q *Query) Trace(trace Tracer) *Query { + q.trace = trace + return q +} + +// Observer enables query-level observer on this query. +// The provided observer will be called every time this query is executed. +func (q *Query) Observer(observer QueryObserver) *Query { + q.observer = observer + return q +} + +// PageSize will tell the iterator to fetch the result in pages of size n. +// This is useful for iterating over large result sets, but setting the +// page size too low might decrease the performance. This feature is only +// available in Cassandra 2 and onwards. +func (q *Query) PageSize(n int) *Query { + q.pageSize = n + return q +} + +// DefaultTimestamp will enable the with default timestamp flag on the query. +// If enable, this will replace the server side assigned +// timestamp as default timestamp. Note that a timestamp in the query itself +// will still override this timestamp. This is entirely optional. +// +// Only available on protocol >= 3 +func (q *Query) DefaultTimestamp(enable bool) *Query { + q.defaultTimestamp = enable + return q +} + +// WithTimestamp will enable the with default timestamp flag on the query +// like DefaultTimestamp does. But also allows to define value for timestamp. +// It works the same way as USING TIMESTAMP in the query itself, but +// should not break prepared query optimization. +// +// Only available on protocol >= 3 +func (q *Query) WithTimestamp(timestamp int64) *Query { + q.DefaultTimestamp(true) + q.defaultTimestampValue = timestamp + return q +} + +// RoutingKey sets the routing key to use when a token aware connection +// pool is used to optimize the routing of this query. +func (q *Query) RoutingKey(routingKey []byte) *Query { + q.routingKey = routingKey + return q +} + +func (q *Query) withContext(ctx context.Context) ExecutableQuery { + // I really wish go had covariant types + return q.WithContext(ctx) +} + +// WithContext returns a shallow copy of q with its context +// set to ctx. +// +// The provided context controls the entire lifetime of executing a +// query, queries will be canceled and return once the context is +// canceled. +func (q *Query) WithContext(ctx context.Context) *Query { + q2 := *q + q2.context = ctx + return &q2 +} + +// Deprecate: does nothing, cancel the context passed to WithContext +func (q *Query) Cancel() { + // TODO: delete +} + +func (q *Query) execute(ctx context.Context, conn *Conn) *Iter { + return conn.executeQuery(ctx, q) +} + +func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) { + latency := end.Sub(start) + attempt, metricsForHost := q.metrics.attempt(1, latency, host, q.observer != nil) + + if q.observer != nil { + q.observer.ObserveQuery(q.Context(), ObservedQuery{ + Keyspace: keyspace, + Statement: q.stmt, + Values: q.values, + Start: start, + End: end, + Rows: iter.numRows, + Host: host, + Metrics: metricsForHost, + Err: iter.err, + Attempt: attempt, + }) + } +} + +func (q *Query) retryPolicy() RetryPolicy { + return q.rt +} + +// Keyspace returns the keyspace the query will be executed against. +func (q *Query) Keyspace() string { + if q.getKeyspace != nil { + return q.getKeyspace() + } + if q.routingInfo.keyspace != "" { + return q.routingInfo.keyspace + } + + if q.session == nil { + return "" + } + // TODO(chbannis): this should be parsed from the query or we should let + // this be set by users. + return q.session.cfg.Keyspace +} + +// Table returns name of the table the query will be executed against. +func (q *Query) Table() string { + return q.routingInfo.table +} + +// GetRoutingKey gets the routing key to use for routing this query. If +// a routing key has not been explicitly set, then the routing key will +// be constructed if possible using the keyspace's schema and the query +// info for this query statement. If the routing key cannot be determined +// then nil will be returned with no error. On any error condition, +// an error description will be returned. +func (q *Query) GetRoutingKey() ([]byte, error) { + if q.routingKey != nil { + return q.routingKey, nil + } else if q.binding != nil && len(q.values) == 0 { + // If this query was created using session.Bind we wont have the query + // values yet, so we have to pass down to the next policy. + // TODO: Remove this and handle this case + return nil, nil + } + + // try to determine the routing key + routingKeyInfo, err := q.session.routingKeyInfo(q.Context(), q.stmt) + if err != nil { + return nil, err + } + + if routingKeyInfo != nil { + q.routingInfo.mu.Lock() + q.routingInfo.keyspace = routingKeyInfo.keyspace + q.routingInfo.table = routingKeyInfo.table + q.routingInfo.mu.Unlock() + } + return createRoutingKey(routingKeyInfo, q.values) +} + +func (q *Query) shouldPrepare() bool { + + stmt := strings.TrimLeftFunc(strings.TrimRightFunc(q.stmt, func(r rune) bool { + return unicode.IsSpace(r) || r == ';' + }), unicode.IsSpace) + + var stmtType string + if n := strings.IndexFunc(stmt, unicode.IsSpace); n >= 0 { + stmtType = strings.ToLower(stmt[:n]) + } + if stmtType == "begin" { + if n := strings.LastIndexFunc(stmt, unicode.IsSpace); n >= 0 { + stmtType = strings.ToLower(stmt[n+1:]) + } + } + switch stmtType { + case "select", "insert", "update", "delete", "batch": + return true + } + return false +} + +// SetPrefetch sets the default threshold for pre-fetching new pages. If +// there are only p*pageSize rows remaining, the next page will be requested +// automatically. +func (q *Query) Prefetch(p float64) *Query { + q.prefetch = p + return q +} + +// RetryPolicy sets the policy to use when retrying the query. +func (q *Query) RetryPolicy(r RetryPolicy) *Query { + q.rt = r + return q +} + +// SetSpeculativeExecutionPolicy sets the execution policy +func (q *Query) SetSpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Query { + q.spec = sp + return q +} + +// speculativeExecutionPolicy fetches the policy +func (q *Query) speculativeExecutionPolicy() SpeculativeExecutionPolicy { + return q.spec +} + +// IsIdempotent returns whether the query is marked as idempotent. +// Non-idempotent query won't be retried. +// See "Retries and speculative execution" in package docs for more details. +func (q *Query) IsIdempotent() bool { + return q.idempotent +} + +// Idempotent marks the query as being idempotent or not depending on +// the value. +// Non-idempotent query won't be retried. +// See "Retries and speculative execution" in package docs for more details. +func (q *Query) Idempotent(value bool) *Query { + q.idempotent = value + return q +} + +// Bind sets query arguments of query. This can also be used to rebind new query arguments +// to an existing query instance. +func (q *Query) Bind(v ...interface{}) *Query { + q.values = v + q.pageState = nil + return q +} + +// SerialConsistency sets the consistency level for the +// serial phase of conditional updates. That consistency can only be +// either SERIAL or LOCAL_SERIAL and if not present, it defaults to +// SERIAL. This option will be ignored for anything else that a +// conditional update/insert. +func (q *Query) SerialConsistency(cons SerialConsistency) *Query { + q.serialCons = cons + return q +} + +// PageState sets the paging state for the query to resume paging from a specific +// point in time. Setting this will disable to query paging for this query, and +// must be used for all subsequent pages. +func (q *Query) PageState(state []byte) *Query { + q.pageState = state + q.disableAutoPage = true + return q +} + +// NoSkipMetadata will override the internal result metadata cache so that the driver does not +// send skip_metadata for queries, this means that the result will always contain +// the metadata to parse the rows and will not reuse the metadata from the prepared +// statement. This should only be used to work around cassandra bugs, such as when using +// CAS operations which do not end in Cas. +// +// See https://issues.apache.org/jira/browse/CASSANDRA-11099 +// https://github.com/gocql/gocql/issues/612 +func (q *Query) NoSkipMetadata() *Query { + q.disableSkipMetadata = true + return q +} + +// Exec executes the query without returning any rows. +func (q *Query) Exec() error { + return q.Iter().Close() +} + +func isUseStatement(stmt string) bool { + if len(stmt) < 3 { + return false + } + + return strings.EqualFold(stmt[0:3], "use") +} + +// Iter executes the query and returns an iterator capable of iterating +// over all results. +func (q *Query) Iter() *Iter { + if isUseStatement(q.stmt) { + return &Iter{err: ErrUseStmt} + } + // if the query was specifically run on a connection then re-use that + // connection when fetching the next results + if q.conn != nil { + return q.conn.executeQuery(q.Context(), q) + } + return q.session.executeQuery(q) +} + +// MapScan executes the query, copies the columns of the first selected +// row into the map pointed at by m and discards the rest. If no rows +// were selected, ErrNotFound is returned. +func (q *Query) MapScan(m map[string]interface{}) error { + iter := q.Iter() + if err := iter.checkErrAndNotFound(); err != nil { + return err + } + iter.MapScan(m) + return iter.Close() +} + +// Scan executes the query, copies the columns of the first selected +// row into the values pointed at by dest and discards the rest. If no rows +// were selected, ErrNotFound is returned. +func (q *Query) Scan(dest ...interface{}) error { + iter := q.Iter() + if err := iter.checkErrAndNotFound(); err != nil { + return err + } + iter.Scan(dest...) + return iter.Close() +} + +// ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT +// statement containing an IF clause). If the transaction fails because +// the existing values did not match, the previous values will be stored +// in dest. +// +// As for INSERT .. IF NOT EXISTS, previous values will be returned as if +// SELECT * FROM. So using ScanCAS with INSERT is inherently prone to +// column mismatching. Use MapScanCAS to capture them safely. +func (q *Query) ScanCAS(dest ...interface{}) (applied bool, err error) { + q.disableSkipMetadata = true + iter := q.Iter() + if err := iter.checkErrAndNotFound(); err != nil { + return false, err + } + if len(iter.Columns()) > 1 { + dest = append([]interface{}{&applied}, dest...) + iter.Scan(dest...) + } else { + iter.Scan(&applied) + } + return applied, iter.Close() +} + +// MapScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT +// statement containing an IF clause). If the transaction fails because +// the existing values did not match, the previous values will be stored +// in dest map. +// +// As for INSERT .. IF NOT EXISTS, previous values will be returned as if +// SELECT * FROM. So using ScanCAS with INSERT is inherently prone to +// column mismatching. MapScanCAS is added to capture them safely. +func (q *Query) MapScanCAS(dest map[string]interface{}) (applied bool, err error) { + q.disableSkipMetadata = true + iter := q.Iter() + if err := iter.checkErrAndNotFound(); err != nil { + return false, err + } + iter.MapScan(dest) + applied = dest["[applied]"].(bool) + delete(dest, "[applied]") + + return applied, iter.Close() +} + +// Release releases a query back into a pool of queries. Released Queries +// cannot be reused. +// +// Example: +// +// qry := session.Query("SELECT * FROM my_table") +// qry.Exec() +// qry.Release() +func (q *Query) Release() { + q.decRefCount() +} + +// reset zeroes out all fields of a query so that it can be safely pooled. +func (q *Query) reset() { + *q = Query{routingInfo: &queryRoutingInfo{}, refCount: 1} +} + +func (q *Query) incRefCount() { + atomic.AddUint32(&q.refCount, 1) +} + +func (q *Query) decRefCount() { + if res := atomic.AddUint32(&q.refCount, ^uint32(0)); res == 0 { + // do release + q.reset() + queryPool.Put(q) + } +} + +func (q *Query) borrowForExecution() { + q.incRefCount() +} + +func (q *Query) releaseAfterExecution() { + q.decRefCount() +} + +// Iter represents an iterator that can be used to iterate over all rows that +// were returned by a query. The iterator might send additional queries to the +// database during the iteration if paging was enabled. +type Iter struct { + err error + pos int + meta resultMetadata + numRows int + next *nextIter + host *HostInfo + + framer *framer + closed int32 +} + +// Host returns the host which the query was sent to. +func (iter *Iter) Host() *HostInfo { + return iter.host +} + +// Columns returns the name and type of the selected columns. +func (iter *Iter) Columns() []ColumnInfo { + return iter.meta.columns +} + +type Scanner interface { + // Next advances the row pointer to point at the next row, the row is valid until + // the next call of Next. It returns true if there is a row which is available to be + // scanned into with Scan. + // Next must be called before every call to Scan. + Next() bool + + // Scan copies the current row's columns into dest. If the length of dest does not equal + // the number of columns returned in the row an error is returned. If an error is encountered + // when unmarshalling a column into the value in dest an error is returned and the row is invalidated + // until the next call to Next. + // Next must be called before calling Scan, if it is not an error is returned. + Scan(...interface{}) error + + // Err returns the if there was one during iteration that resulted in iteration being unable to complete. + // Err will also release resources held by the iterator, the Scanner should not used after being called. + Err() error +} + +type iterScanner struct { + iter *Iter + cols [][]byte + valid bool +} + +func (is *iterScanner) Next() bool { + iter := is.iter + if iter.err != nil { + return false + } + + if iter.pos >= iter.numRows { + if iter.next != nil { + is.iter = iter.next.fetch() + return is.Next() + } + return false + } + + for i := 0; i < len(is.cols); i++ { + col, err := iter.readColumn() + if err != nil { + iter.err = err + return false + } + is.cols[i] = col + } + iter.pos++ + is.valid = true + + return true +} + +func scanColumn(p []byte, col ColumnInfo, dest []interface{}) (int, error) { + if dest[0] == nil { + return 1, nil + } + + if col.TypeInfo.Type() == TypeTuple { + // this will panic, actually a bug, please report + tuple := col.TypeInfo.(TupleTypeInfo) + + count := len(tuple.Elems) + // here we pass in a slice of the struct which has the number number of + // values as elements in the tuple + if err := Unmarshal(col.TypeInfo, p, dest[:count]); err != nil { + return 0, err + } + return count, nil + } else { + if err := Unmarshal(col.TypeInfo, p, dest[0]); err != nil { + return 0, err + } + return 1, nil + } +} + +func (is *iterScanner) Scan(dest ...interface{}) error { + if !is.valid { + return errors.New("gocql: Scan called without calling Next") + } + + iter := is.iter + // currently only support scanning into an expand tuple, such that its the same + // as scanning in more values from a single column + if len(dest) != iter.meta.actualColCount { + return fmt.Errorf("gocql: not enough columns to scan into: have %d want %d", len(dest), iter.meta.actualColCount) + } + + // i is the current position in dest, could posible replace it and just use + // slices of dest + i := 0 + var err error + for _, col := range iter.meta.columns { + var n int + n, err = scanColumn(is.cols[i], col, dest[i:]) + if err != nil { + break + } + i += n + } + + is.valid = false + return err +} + +func (is *iterScanner) Err() error { + iter := is.iter + is.iter = nil + is.cols = nil + is.valid = false + return iter.Close() +} + +// Scanner returns a row Scanner which provides an interface to scan rows in a manner which is +// similar to database/sql. The iter should NOT be used again after calling this method. +func (iter *Iter) Scanner() Scanner { + if iter == nil { + return nil + } + + return &iterScanner{iter: iter, cols: make([][]byte, len(iter.meta.columns))} +} + +func (iter *Iter) readColumn() ([]byte, error) { + return iter.framer.readBytesInternal() +} + +// Scan consumes the next row of the iterator and copies the columns of the +// current row into the values pointed at by dest. Use nil as a dest value +// to skip the corresponding column. Scan might send additional queries +// to the database to retrieve the next set of rows if paging was enabled. +// +// Scan returns true if the row was successfully unmarshaled or false if the +// end of the result set was reached or if an error occurred. Close should +// be called afterwards to retrieve any potential errors. +func (iter *Iter) Scan(dest ...interface{}) bool { + if iter.err != nil { + return false + } + + if iter.pos >= iter.numRows { + if iter.next != nil { + *iter = *iter.next.fetch() + return iter.Scan(dest...) + } + return false + } + + if iter.next != nil && iter.pos >= iter.next.pos { + iter.next.fetchAsync() + } + + // currently only support scanning into an expand tuple, such that its the same + // as scanning in more values from a single column + if len(dest) != iter.meta.actualColCount { + iter.err = fmt.Errorf("gocql: not enough columns to scan into: have %d want %d", len(dest), iter.meta.actualColCount) + return false + } + + // i is the current position in dest, could posible replace it and just use + // slices of dest + i := 0 + for _, col := range iter.meta.columns { + colBytes, err := iter.readColumn() + if err != nil { + iter.err = err + return false + } + + n, err := scanColumn(colBytes, col, dest[i:]) + if err != nil { + iter.err = err + return false + } + i += n + } + + iter.pos++ + return true +} + +// GetCustomPayload returns any parsed custom payload results if given in the +// response from Cassandra. Note that the result is not a copy. +// +// This additional feature of CQL Protocol v4 +// allows additional results and query information to be returned by +// custom QueryHandlers running in your C* cluster. +// See https://datastax.github.io/java-driver/manual/custom_payloads/ +func (iter *Iter) GetCustomPayload() map[string][]byte { + if iter.framer != nil { + return iter.framer.customPayload + } + return nil +} + +// Warnings returns any warnings generated if given in the response from Cassandra. +// +// This is only available starting with CQL Protocol v4. +func (iter *Iter) Warnings() []string { + if iter.framer != nil { + return iter.framer.header.warnings + } + return nil +} + +// Close closes the iterator and returns any errors that happened during +// the query or the iteration. +func (iter *Iter) Close() error { + if atomic.CompareAndSwapInt32(&iter.closed, 0, 1) { + if iter.framer != nil { + iter.framer = nil + } + } + + return iter.err +} + +// WillSwitchPage detects if iterator reached end of current page +// and the next page is available. +func (iter *Iter) WillSwitchPage() bool { + return iter.pos >= iter.numRows && iter.next != nil +} + +// checkErrAndNotFound handle error and NotFound in one method. +func (iter *Iter) checkErrAndNotFound() error { + if iter.err != nil { + return iter.err + } else if iter.numRows == 0 { + return ErrNotFound + } + return nil +} + +// PageState return the current paging state for a query which can be used for +// subsequent queries to resume paging this point. +func (iter *Iter) PageState() []byte { + return iter.meta.pagingState +} + +// NumRows returns the number of rows in this pagination, it will update when new +// pages are fetched, it is not the value of the total number of rows this iter +// will return unless there is only a single page returned. +func (iter *Iter) NumRows() int { + return iter.numRows +} + +// nextIter holds state for fetching a single page in an iterator. +// single page might be attempted multiple times due to retries. +type nextIter struct { + qry *Query + pos int + oncea sync.Once + once sync.Once + next *Iter +} + +func (n *nextIter) fetchAsync() { + n.oncea.Do(func() { + go n.fetch() + }) +} + +func (n *nextIter) fetch() *Iter { + n.once.Do(func() { + // if the query was specifically run on a connection then re-use that + // connection when fetching the next results + if n.qry.conn != nil { + n.next = n.qry.conn.executeQuery(n.qry.Context(), n.qry) + } else { + n.next = n.qry.session.executeQuery(n.qry) + } + }) + return n.next +} + +type Batch struct { + Type BatchType + Entries []BatchEntry + Cons Consistency + routingKey []byte + CustomPayload map[string][]byte + rt RetryPolicy + spec SpeculativeExecutionPolicy + trace Tracer + observer BatchObserver + session *Session + serialCons SerialConsistency + defaultTimestamp bool + defaultTimestampValue int64 + context context.Context + cancelBatch func() + keyspace string + metrics *queryMetrics + + // routingInfo is a pointer because Query can be copied and copyable struct can't hold a mutex. + routingInfo *queryRoutingInfo +} + +// NewBatch creates a new batch operation without defaults from the cluster +// +// Deprecated: use session.NewBatch instead +func NewBatch(typ BatchType) *Batch { + return &Batch{ + Type: typ, + metrics: &queryMetrics{m: make(map[string]*hostMetrics)}, + spec: &NonSpeculativeExecution{}, + routingInfo: &queryRoutingInfo{}, + } +} + +// NewBatch creates a new batch operation using defaults defined in the cluster +func (s *Session) NewBatch(typ BatchType) *Batch { + s.mu.RLock() + batch := &Batch{ + Type: typ, + rt: s.cfg.RetryPolicy, + serialCons: s.cfg.SerialConsistency, + trace: s.trace, + observer: s.batchObserver, + session: s, + Cons: s.cons, + defaultTimestamp: s.cfg.DefaultTimestamp, + keyspace: s.cfg.Keyspace, + metrics: &queryMetrics{m: make(map[string]*hostMetrics)}, + spec: &NonSpeculativeExecution{}, + routingInfo: &queryRoutingInfo{}, + } + + s.mu.RUnlock() + return batch +} + +// Trace enables tracing of this batch. Look at the documentation of the +// Tracer interface to learn more about tracing. +func (b *Batch) Trace(trace Tracer) *Batch { + b.trace = trace + return b +} + +// Observer enables batch-level observer on this batch. +// The provided observer will be called every time this batched query is executed. +func (b *Batch) Observer(observer BatchObserver) *Batch { + b.observer = observer + return b +} + +func (b *Batch) Keyspace() string { + return b.keyspace +} + +// Batch has no reasonable eqivalent of Query.Table(). +func (b *Batch) Table() string { + return b.routingInfo.table +} + +// Attempts returns the number of attempts made to execute the batch. +func (b *Batch) Attempts() int { + return b.metrics.attempts() +} + +func (b *Batch) AddAttempts(i int, host *HostInfo) { + b.metrics.attempt(i, 0, host, false) +} + +// Latency returns the average number of nanoseconds to execute a single attempt of the batch. +func (b *Batch) Latency() int64 { + return b.metrics.latency() +} + +func (b *Batch) AddLatency(l int64, host *HostInfo) { + b.metrics.attempt(0, time.Duration(l)*time.Nanosecond, host, false) +} + +// GetConsistency returns the currently configured consistency level for the batch +// operation. +func (b *Batch) GetConsistency() Consistency { + return b.Cons +} + +// SetConsistency sets the currently configured consistency level for the batch +// operation. +func (b *Batch) SetConsistency(c Consistency) { + b.Cons = c +} + +func (b *Batch) Context() context.Context { + if b.context == nil { + return context.Background() + } + return b.context +} + +func (b *Batch) IsIdempotent() bool { + for _, entry := range b.Entries { + if !entry.Idempotent { + return false + } + } + return true +} + +func (b *Batch) speculativeExecutionPolicy() SpeculativeExecutionPolicy { + return b.spec +} + +func (b *Batch) SpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Batch { + b.spec = sp + return b +} + +// Query adds the query to the batch operation +func (b *Batch) Query(stmt string, args ...interface{}) { + b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args}) +} + +// Bind adds the query to the batch operation and correlates it with a binding callback +// that will be invoked when the batch is executed. The binding callback allows the application +// to define which query argument values will be marshalled as part of the batch execution. +func (b *Batch) Bind(stmt string, bind func(q *QueryInfo) ([]interface{}, error)) { + b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, binding: bind}) +} + +func (b *Batch) retryPolicy() RetryPolicy { + return b.rt +} + +// RetryPolicy sets the retry policy to use when executing the batch operation +func (b *Batch) RetryPolicy(r RetryPolicy) *Batch { + b.rt = r + return b +} + +func (b *Batch) withContext(ctx context.Context) ExecutableQuery { + return b.WithContext(ctx) +} + +// WithContext returns a shallow copy of b with its context +// set to ctx. +// +// The provided context controls the entire lifetime of executing a +// query, queries will be canceled and return once the context is +// canceled. +func (b *Batch) WithContext(ctx context.Context) *Batch { + b2 := *b + b2.context = ctx + return &b2 +} + +// Deprecate: does nothing, cancel the context passed to WithContext +func (*Batch) Cancel() { + // TODO: delete +} + +// Size returns the number of batch statements to be executed by the batch operation. +func (b *Batch) Size() int { + return len(b.Entries) +} + +// SerialConsistency sets the consistency level for the +// serial phase of conditional updates. That consistency can only be +// either SERIAL or LOCAL_SERIAL and if not present, it defaults to +// SERIAL. This option will be ignored for anything else that a +// conditional update/insert. +// +// Only available for protocol 3 and above +func (b *Batch) SerialConsistency(cons SerialConsistency) *Batch { + b.serialCons = cons + return b +} + +// DefaultTimestamp will enable the with default timestamp flag on the query. +// If enable, this will replace the server side assigned +// timestamp as default timestamp. Note that a timestamp in the query itself +// will still override this timestamp. This is entirely optional. +// +// Only available on protocol >= 3 +func (b *Batch) DefaultTimestamp(enable bool) *Batch { + b.defaultTimestamp = enable + return b +} + +// WithTimestamp will enable the with default timestamp flag on the query +// like DefaultTimestamp does. But also allows to define value for timestamp. +// It works the same way as USING TIMESTAMP in the query itself, but +// should not break prepared query optimization. +// +// Only available on protocol >= 3 +func (b *Batch) WithTimestamp(timestamp int64) *Batch { + b.DefaultTimestamp(true) + b.defaultTimestampValue = timestamp + return b +} + +func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) { + latency := end.Sub(start) + attempt, metricsForHost := b.metrics.attempt(1, latency, host, b.observer != nil) + + if b.observer == nil { + return + } + + statements := make([]string, len(b.Entries)) + values := make([][]interface{}, len(b.Entries)) + + for i, entry := range b.Entries { + statements[i] = entry.Stmt + values[i] = entry.Args + } + + b.observer.ObserveBatch(b.Context(), ObservedBatch{ + Keyspace: keyspace, + Statements: statements, + Values: values, + Start: start, + End: end, + // Rows not used in batch observations // TODO - might be able to support it when using BatchCAS + Host: host, + Metrics: metricsForHost, + Err: iter.err, + Attempt: attempt, + }) +} + +func (b *Batch) GetRoutingKey() ([]byte, error) { + if b.routingKey != nil { + return b.routingKey, nil + } + + if len(b.Entries) == 0 { + return nil, nil + } + + entry := b.Entries[0] + if entry.binding != nil { + // bindings do not have the values let's skip it like Query does. + return nil, nil + } + // try to determine the routing key + routingKeyInfo, err := b.session.routingKeyInfo(b.Context(), entry.Stmt) + if err != nil { + return nil, err + } + + return createRoutingKey(routingKeyInfo, entry.Args) +} + +func createRoutingKey(routingKeyInfo *routingKeyInfo, values []interface{}) ([]byte, error) { + if routingKeyInfo == nil { + return nil, nil + } + + if len(routingKeyInfo.indexes) == 1 { + // single column routing key + routingKey, err := Marshal( + routingKeyInfo.types[0], + values[routingKeyInfo.indexes[0]], + ) + if err != nil { + return nil, err + } + return routingKey, nil + } + + // composite routing key + buf := bytes.NewBuffer(make([]byte, 0, 256)) + for i := range routingKeyInfo.indexes { + encoded, err := Marshal( + routingKeyInfo.types[i], + values[routingKeyInfo.indexes[i]], + ) + if err != nil { + return nil, err + } + lenBuf := []byte{0x00, 0x00} + binary.BigEndian.PutUint16(lenBuf, uint16(len(encoded))) + buf.Write(lenBuf) + buf.Write(encoded) + buf.WriteByte(0x00) + } + routingKey := buf.Bytes() + return routingKey, nil +} + +func (b *Batch) borrowForExecution() { + // empty, because Batch has no equivalent of Query.Release() + // that would race with speculative executions. +} + +func (b *Batch) releaseAfterExecution() { + // empty, because Batch has no equivalent of Query.Release() + // that would race with speculative executions. +} + +type BatchType byte + +const ( + LoggedBatch BatchType = 0 + UnloggedBatch BatchType = 1 + CounterBatch BatchType = 2 +) + +type BatchEntry struct { + Stmt string + Args []interface{} + Idempotent bool + binding func(q *QueryInfo) ([]interface{}, error) +} + +type ColumnInfo struct { + Keyspace string + Table string + Name string + TypeInfo TypeInfo +} + +func (c ColumnInfo) String() string { + return fmt.Sprintf("[column keyspace=%s table=%s name=%s type=%v]", c.Keyspace, c.Table, c.Name, c.TypeInfo) +} + +// routing key indexes LRU cache +type routingKeyInfoLRU struct { + lru *lru.Cache + mu sync.Mutex +} + +type routingKeyInfo struct { + indexes []int + types []TypeInfo + keyspace string + table string +} + +func (r *routingKeyInfo) String() string { + return fmt.Sprintf("routing key index=%v types=%v", r.indexes, r.types) +} + +func (r *routingKeyInfoLRU) Remove(key string) { + r.mu.Lock() + r.lru.Remove(key) + r.mu.Unlock() +} + +// Max adjusts the maximum size of the cache and cleans up the oldest records if +// the new max is lower than the previous value. Not concurrency safe. +func (r *routingKeyInfoLRU) Max(max int) { + r.mu.Lock() + for r.lru.Len() > max { + r.lru.RemoveOldest() + } + r.lru.MaxEntries = max + r.mu.Unlock() +} + +type inflightCachedEntry struct { + wg sync.WaitGroup + err error + value interface{} +} + +// Tracer is the interface implemented by query tracers. Tracers have the +// ability to obtain a detailed event log of all events that happened during +// the execution of a query from Cassandra. Gathering this information might +// be essential for debugging and optimizing queries, but this feature should +// not be used on production systems with very high load. +type Tracer interface { + Trace(traceId []byte) +} + +type traceWriter struct { + session *Session + w io.Writer + mu sync.Mutex +} + +// NewTraceWriter returns a simple Tracer implementation that outputs +// the event log in a textual format. +func NewTraceWriter(session *Session, w io.Writer) Tracer { + return &traceWriter{session: session, w: w} +} + +func (t *traceWriter) Trace(traceId []byte) { + var ( + coordinator string + duration int + ) + iter := t.session.control.query(`SELECT coordinator, duration + FROM system_traces.sessions + WHERE session_id = ?`, traceId) + + iter.Scan(&coordinator, &duration) + if err := iter.Close(); err != nil { + t.mu.Lock() + fmt.Fprintln(t.w, "Error:", err) + t.mu.Unlock() + return + } + + var ( + timestamp time.Time + activity string + source string + elapsed int + thread string + ) + + t.mu.Lock() + defer t.mu.Unlock() + + fmt.Fprintf(t.w, "Tracing session %016x (coordinator: %s, duration: %v):\n", + traceId, coordinator, time.Duration(duration)*time.Microsecond) + + iter = t.session.control.query(`SELECT event_id, activity, source, source_elapsed, thread + FROM system_traces.events + WHERE session_id = ?`, traceId) + + for iter.Scan(×tamp, &activity, &source, &elapsed, &thread) { + fmt.Fprintf(t.w, "%s: %s [%s] (source: %s, elapsed: %d)\n", + timestamp.Format("2006/01/02 15:04:05.999999"), activity, thread, source, elapsed) + } + + if err := iter.Close(); err != nil { + fmt.Fprintln(t.w, "Error:", err) + } +} + +type ObservedQuery struct { + Keyspace string + Statement string + + // Values holds a slice of bound values for the query. + // Do not modify the values here, they are shared with multiple goroutines. + Values []interface{} + + Start time.Time // time immediately before the query was called + End time.Time // time immediately after the query returned + + // Rows is the number of rows in the current iter. + // In paginated queries, rows from previous scans are not counted. + // Rows is not used in batch queries and remains at the default value + Rows int + + // Host is the informations about the host that performed the query + Host *HostInfo + + // The metrics per this host + Metrics *hostMetrics + + // Err is the error in the query. + // It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error + Err error + + // Attempt is the index of attempt at executing this query. + // The first attempt is number zero and any retries have non-zero attempt number. + Attempt int +} + +// QueryObserver is the interface implemented by query observers / stat collectors. +// +// Experimental, this interface and use may change +type QueryObserver interface { + // ObserveQuery gets called on every query to cassandra, including all queries in an iterator when paging is enabled. + // It doesn't get called if there is no query because the session is closed or there are no connections available. + // The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil. + ObserveQuery(context.Context, ObservedQuery) +} + +type ObservedBatch struct { + Keyspace string + Statements []string + + // Values holds a slice of bound values for each statement. + // Values[i] are bound values passed to Statements[i]. + // Do not modify the values here, they are shared with multiple goroutines. + Values [][]interface{} + + Start time.Time // time immediately before the batch query was called + End time.Time // time immediately after the batch query returned + + // Host is the informations about the host that performed the batch + Host *HostInfo + + // Err is the error in the batch query. + // It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error + Err error + + // The metrics per this host + Metrics *hostMetrics + + // Attempt is the index of attempt at executing this query. + // The first attempt is number zero and any retries have non-zero attempt number. + Attempt int +} + +// BatchObserver is the interface implemented by batch observers / stat collectors. +type BatchObserver interface { + // ObserveBatch gets called on every batch query to cassandra. + // It also gets called once for each query in a batch. + // It doesn't get called if there is no query because the session is closed or there are no connections available. + // The error reported only shows query errors, i.e. if a SELECT is valid but finds no matches it will be nil. + // Unlike QueryObserver.ObserveQuery it does no reporting on rows read. + ObserveBatch(context.Context, ObservedBatch) +} + +type ObservedConnect struct { + // Host is the information about the host about to connect + Host *HostInfo + + Start time.Time // time immediately before the dial is called + End time.Time // time immediately after the dial returned + + // Err is the connection error (if any) + Err error +} + +// ConnectObserver is the interface implemented by connect observers / stat collectors. +type ConnectObserver interface { + // ObserveConnect gets called when a new connection to cassandra is made. + ObserveConnect(ObservedConnect) +} + +type Error struct { + Code int + Message string +} + +func (e Error) Error() string { + return e.Message +} + +var ( + ErrNotFound = errors.New("not found") + ErrUnavailable = errors.New("unavailable") + ErrUnsupported = errors.New("feature not supported") + ErrTooManyStmts = errors.New("too many statements") + ErrUseStmt = errors.New("use statements aren't supported. Please see https://github.com/gocql/gocql for explanation.") + ErrSessionClosed = errors.New("session has been closed") + ErrNoConnections = errors.New("gocql: no hosts available in the pool") + ErrNoKeyspace = errors.New("no keyspace provided") + ErrKeyspaceDoesNotExist = errors.New("keyspace does not exist") + ErrNoMetadata = errors.New("no metadata available") +) + +type ErrProtocol struct{ error } + +func NewErrProtocol(format string, args ...interface{}) error { + return ErrProtocol{fmt.Errorf(format, args...)} +} + +// BatchSizeMaximum is the maximum number of statements a batch operation can have. +// This limit is set by cassandra and could change in the future. +const BatchSizeMaximum = 65535 diff --git a/vendor/github.com/gocql/gocql/token.go b/vendor/github.com/gocql/gocql/token.go new file mode 100644 index 000000000..7471299a9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/token.go @@ -0,0 +1,222 @@ +// Copyright (c) 2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "bytes" + "crypto/md5" + "fmt" + "math/big" + "sort" + "strconv" + "strings" + + "github.com/gocql/gocql/internal/murmur" +) + +// a token partitioner +type partitioner interface { + Name() string + Hash([]byte) token + ParseString(string) token +} + +// a token +type token interface { + fmt.Stringer + Less(token) bool +} + +// murmur3 partitioner and token +type murmur3Partitioner struct{} +type murmur3Token int64 + +func (p murmur3Partitioner) Name() string { + return "Murmur3Partitioner" +} + +func (p murmur3Partitioner) Hash(partitionKey []byte) token { + h1 := murmur.Murmur3H1(partitionKey) + return murmur3Token(h1) +} + +// murmur3 little-endian, 128-bit hash, but returns only h1 +func (p murmur3Partitioner) ParseString(str string) token { + val, _ := strconv.ParseInt(str, 10, 64) + return murmur3Token(val) +} + +func (m murmur3Token) String() string { + return strconv.FormatInt(int64(m), 10) +} + +func (m murmur3Token) Less(token token) bool { + return m < token.(murmur3Token) +} + +// order preserving partitioner and token +type orderedPartitioner struct{} +type orderedToken string + +func (p orderedPartitioner) Name() string { + return "OrderedPartitioner" +} + +func (p orderedPartitioner) Hash(partitionKey []byte) token { + // the partition key is the token + return orderedToken(partitionKey) +} + +func (p orderedPartitioner) ParseString(str string) token { + return orderedToken(str) +} + +func (o orderedToken) String() string { + return string(o) +} + +func (o orderedToken) Less(token token) bool { + return o < token.(orderedToken) +} + +// random partitioner and token +type randomPartitioner struct{} +type randomToken big.Int + +func (r randomPartitioner) Name() string { + return "RandomPartitioner" +} + +// 2 ** 128 +var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10) + +func (p randomPartitioner) Hash(partitionKey []byte) token { + sum := md5.Sum(partitionKey) + val := new(big.Int) + val.SetBytes(sum[:]) + if sum[0] > 127 { + val.Sub(val, maxHashInt) + val.Abs(val) + } + + return (*randomToken)(val) +} + +func (p randomPartitioner) ParseString(str string) token { + val := new(big.Int) + val.SetString(str, 10) + return (*randomToken)(val) +} + +func (r *randomToken) String() string { + return (*big.Int)(r).String() +} + +func (r *randomToken) Less(token token) bool { + return -1 == (*big.Int)(r).Cmp((*big.Int)(token.(*randomToken))) +} + +type hostToken struct { + token token + host *HostInfo +} + +func (ht hostToken) String() string { + return fmt.Sprintf("{token=%v host=%v}", ht.token, ht.host.HostID()) +} + +// a data structure for organizing the relationship between tokens and hosts +type tokenRing struct { + partitioner partitioner + + // tokens map token range to primary replica. + // The elements in tokens are sorted by token ascending. + // The range for a given item in tokens starts after preceding range and ends with the token specified in + // token. The end token is part of the range. + // The lowest (i.e. index 0) range wraps around the ring (its preceding range is the one with largest index). + tokens []hostToken + + hosts []*HostInfo +} + +func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) { + tokenRing := &tokenRing{ + hosts: hosts, + } + + if strings.HasSuffix(partitioner, "Murmur3Partitioner") { + tokenRing.partitioner = murmur3Partitioner{} + } else if strings.HasSuffix(partitioner, "OrderedPartitioner") { + tokenRing.partitioner = orderedPartitioner{} + } else if strings.HasSuffix(partitioner, "RandomPartitioner") { + tokenRing.partitioner = randomPartitioner{} + } else { + return nil, fmt.Errorf("unsupported partitioner '%s'", partitioner) + } + + for _, host := range hosts { + for _, strToken := range host.Tokens() { + token := tokenRing.partitioner.ParseString(strToken) + tokenRing.tokens = append(tokenRing.tokens, hostToken{token, host}) + } + } + + sort.Sort(tokenRing) + + return tokenRing, nil +} + +func (t *tokenRing) Len() int { + return len(t.tokens) +} + +func (t *tokenRing) Less(i, j int) bool { + return t.tokens[i].token.Less(t.tokens[j].token) +} + +func (t *tokenRing) Swap(i, j int) { + t.tokens[i], t.tokens[j] = t.tokens[j], t.tokens[i] +} + +func (t *tokenRing) String() string { + buf := &bytes.Buffer{} + buf.WriteString("TokenRing(") + if t.partitioner != nil { + buf.WriteString(t.partitioner.Name()) + } + buf.WriteString("){") + sep := "" + for i, th := range t.tokens { + buf.WriteString(sep) + sep = "," + buf.WriteString("\n\t[") + buf.WriteString(strconv.Itoa(i)) + buf.WriteString("]") + buf.WriteString(th.token.String()) + buf.WriteString(":") + buf.WriteString(th.host.ConnectAddress().String()) + } + buf.WriteString("\n}") + return string(buf.Bytes()) +} + +func (t *tokenRing) GetHostForToken(token token) (host *HostInfo, endToken token) { + if t == nil || len(t.tokens) == 0 { + return nil, nil + } + + // find the primary replica + p := sort.Search(len(t.tokens), func(i int) bool { + return !t.tokens[i].token.Less(token) + }) + + if p == len(t.tokens) { + // wrap around to the first in the ring + p = 0 + } + + v := t.tokens[p] + return v.host, v.token +} diff --git a/vendor/github.com/gocql/gocql/topology.go b/vendor/github.com/gocql/gocql/topology.go new file mode 100644 index 000000000..a154f0fb9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/topology.go @@ -0,0 +1,294 @@ +package gocql + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +type hostTokens struct { + // token is end (inclusive) of token range these hosts belong to + token token + hosts []*HostInfo +} + +// tokenRingReplicas maps token ranges to list of replicas. +// The elements in tokenRingReplicas are sorted by token ascending. +// The range for a given item in tokenRingReplicas starts after preceding range and ends with the token specified in +// token. The end token is part of the range. +// The lowest (i.e. index 0) range wraps around the ring (its preceding range is the one with largest index). +type tokenRingReplicas []hostTokens + +func (h tokenRingReplicas) Less(i, j int) bool { return h[i].token.Less(h[j].token) } +func (h tokenRingReplicas) Len() int { return len(h) } +func (h tokenRingReplicas) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h tokenRingReplicas) replicasFor(t token) *hostTokens { + if len(h) == 0 { + return nil + } + + p := sort.Search(len(h), func(i int) bool { + return !h[i].token.Less(t) + }) + + if p >= len(h) { + // rollover + p = 0 + } + + return &h[p] +} + +type placementStrategy interface { + replicaMap(tokenRing *tokenRing) tokenRingReplicas + replicationFactor(dc string) int +} + +func getReplicationFactorFromOpts(val interface{}) (int, error) { + switch v := val.(type) { + case int: + if v < 0 { + return 0, fmt.Errorf("invalid replication_factor %d", v) + } + return v, nil + case string: + n, err := strconv.Atoi(v) + if err != nil { + return 0, fmt.Errorf("invalid replication_factor %q: %v", v, err) + } else if n < 0 { + return 0, fmt.Errorf("invalid replication_factor %d", n) + } + return n, nil + default: + return 0, fmt.Errorf("unknown replication_factor type %T", v) + } +} + +func getStrategy(ks *KeyspaceMetadata, logger StdLogger) placementStrategy { + switch { + case strings.Contains(ks.StrategyClass, "SimpleStrategy"): + rf, err := getReplicationFactorFromOpts(ks.StrategyOptions["replication_factor"]) + if err != nil { + logger.Printf("parse rf for keyspace %q: %v", ks.Name, err) + return nil + } + return &simpleStrategy{rf: rf} + case strings.Contains(ks.StrategyClass, "NetworkTopologyStrategy"): + dcs := make(map[string]int) + for dc, rf := range ks.StrategyOptions { + if dc == "class" { + continue + } + + rf, err := getReplicationFactorFromOpts(rf) + if err != nil { + logger.Println("parse rf for keyspace %q, dc %q: %v", err) + // skip DC if the rf is invalid/unsupported, so that we can at least work with other working DCs. + continue + } + + dcs[dc] = rf + } + return &networkTopology{dcs: dcs} + case strings.Contains(ks.StrategyClass, "LocalStrategy"): + return nil + default: + logger.Printf("parse rf for keyspace %q: unsupported strategy class: %v", ks.StrategyClass) + return nil + } +} + +type simpleStrategy struct { + rf int +} + +func (s *simpleStrategy) replicationFactor(dc string) int { + return s.rf +} + +func (s *simpleStrategy) replicaMap(tokenRing *tokenRing) tokenRingReplicas { + tokens := tokenRing.tokens + ring := make(tokenRingReplicas, len(tokens)) + + for i, th := range tokens { + replicas := make([]*HostInfo, 0, s.rf) + seen := make(map[*HostInfo]bool) + + for j := 0; j < len(tokens) && len(replicas) < s.rf; j++ { + h := tokens[(i+j)%len(tokens)] + if !seen[h.host] { + replicas = append(replicas, h.host) + seen[h.host] = true + } + } + + ring[i] = hostTokens{th.token, replicas} + } + + sort.Sort(ring) + + return ring +} + +type networkTopology struct { + dcs map[string]int +} + +func (n *networkTopology) replicationFactor(dc string) int { + return n.dcs[dc] +} + +func (n *networkTopology) haveRF(replicaCounts map[string]int) bool { + if len(replicaCounts) != len(n.dcs) { + return false + } + + for dc, rf := range n.dcs { + if rf != replicaCounts[dc] { + return false + } + } + + return true +} + +func (n *networkTopology) replicaMap(tokenRing *tokenRing) tokenRingReplicas { + dcRacks := make(map[string]map[string]struct{}, len(n.dcs)) + // skipped hosts in a dc + skipped := make(map[string][]*HostInfo, len(n.dcs)) + // number of replicas per dc + replicasInDC := make(map[string]int, len(n.dcs)) + // dc -> racks + seenDCRacks := make(map[string]map[string]struct{}, len(n.dcs)) + + for _, h := range tokenRing.hosts { + dc := h.DataCenter() + rack := h.Rack() + + racks, ok := dcRacks[dc] + if !ok { + racks = make(map[string]struct{}) + dcRacks[dc] = racks + } + racks[rack] = struct{}{} + } + + for dc, racks := range dcRacks { + replicasInDC[dc] = 0 + seenDCRacks[dc] = make(map[string]struct{}, len(racks)) + } + + tokens := tokenRing.tokens + replicaRing := make(tokenRingReplicas, 0, len(tokens)) + + var totalRF int + for _, rf := range n.dcs { + totalRF += rf + } + + for i, th := range tokenRing.tokens { + if rf := n.dcs[th.host.DataCenter()]; rf == 0 { + // skip this token since no replica in this datacenter. + continue + } + + for k, v := range skipped { + skipped[k] = v[:0] + } + + for dc := range n.dcs { + replicasInDC[dc] = 0 + for rack := range seenDCRacks[dc] { + delete(seenDCRacks[dc], rack) + } + } + + replicas := make([]*HostInfo, 0, totalRF) + for j := 0; j < len(tokens) && (len(replicas) < totalRF && !n.haveRF(replicasInDC)); j++ { + // TODO: ensure we dont add the same host twice + p := i + j + if p >= len(tokens) { + p -= len(tokens) + } + h := tokens[p].host + + dc := h.DataCenter() + rack := h.Rack() + + rf := n.dcs[dc] + if rf == 0 { + // skip this DC, dont know about it or replication factor is zero + continue + } else if replicasInDC[dc] >= rf { + if replicasInDC[dc] > rf { + panic(fmt.Sprintf("replica overflow. rf=%d have=%d in dc %q", rf, replicasInDC[dc], dc)) + } + + // have enough replicas in this DC + continue + } else if _, ok := dcRacks[dc][rack]; !ok { + // dont know about this rack + continue + } + + racks := seenDCRacks[dc] + if _, ok := racks[rack]; ok && len(racks) == len(dcRacks[dc]) { + // we have been through all the racks and dont have RF yet, add this + replicas = append(replicas, h) + replicasInDC[dc]++ + } else if !ok { + if racks == nil { + racks = make(map[string]struct{}, 1) + seenDCRacks[dc] = racks + } + + // new rack + racks[rack] = struct{}{} + replicas = append(replicas, h) + r := replicasInDC[dc] + 1 + + if len(racks) == len(dcRacks[dc]) { + // if we have been through all the racks, drain the rest of the skipped + // hosts until we have RF. The next iteration will skip in the block + // above + skippedHosts := skipped[dc] + var k int + for ; k < len(skippedHosts) && r+k < rf; k++ { + sh := skippedHosts[k] + replicas = append(replicas, sh) + } + r += k + skipped[dc] = skippedHosts[k:] + } + replicasInDC[dc] = r + } else { + // already seen this rack, keep hold of this host incase + // we dont get enough for rf + skipped[dc] = append(skipped[dc], h) + } + } + + if len(replicas) == 0 { + panic(fmt.Sprintf("no replicas for token: %v", th.token)) + } else if !replicas[0].Equal(th.host) { + panic(fmt.Sprintf("first replica is not the primary replica for the token: expected %v got %v", replicas[0].ConnectAddress(), th.host.ConnectAddress())) + } + + replicaRing = append(replicaRing, hostTokens{th.token, replicas}) + } + + dcsWithReplicas := 0 + for _, dc := range n.dcs { + if dc > 0 { + dcsWithReplicas++ + } + } + + if dcsWithReplicas == len(dcRacks) && len(replicaRing) != len(tokens) { + panic(fmt.Sprintf("token map different size to token ring: got %d expected %d", len(replicaRing), len(tokens))) + } + + return replicaRing +} diff --git a/vendor/github.com/gocql/gocql/uuid.go b/vendor/github.com/gocql/gocql/uuid.go new file mode 100644 index 000000000..acdd81f94 --- /dev/null +++ b/vendor/github.com/gocql/gocql/uuid.go @@ -0,0 +1,324 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +// The uuid package can be used to generate and parse universally unique +// identifiers, a standardized format in the form of a 128 bit number. +// +// http://tools.ietf.org/html/rfc4122 + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "net" + "strings" + "sync/atomic" + "time" +) + +type UUID [16]byte + +var hardwareAddr []byte +var clockSeq uint32 + +const ( + VariantNCSCompat = 0 + VariantIETF = 2 + VariantMicrosoft = 6 + VariantFuture = 7 +) + +func init() { + if interfaces, err := net.Interfaces(); err == nil { + for _, i := range interfaces { + if i.Flags&net.FlagLoopback == 0 && len(i.HardwareAddr) > 0 { + hardwareAddr = i.HardwareAddr + break + } + } + } + if hardwareAddr == nil { + // If we failed to obtain the MAC address of the current computer, + // we will use a randomly generated 6 byte sequence instead and set + // the multicast bit as recommended in RFC 4122. + hardwareAddr = make([]byte, 6) + _, err := io.ReadFull(rand.Reader, hardwareAddr) + if err != nil { + panic(err) + } + hardwareAddr[0] = hardwareAddr[0] | 0x01 + } + + // initialize the clock sequence with a random number + var clockSeqRand [2]byte + io.ReadFull(rand.Reader, clockSeqRand[:]) + clockSeq = uint32(clockSeqRand[1])<<8 | uint32(clockSeqRand[0]) +} + +// ParseUUID parses a 32 digit hexadecimal number (that might contain hypens) +// representing an UUID. +func ParseUUID(input string) (UUID, error) { + var u UUID + j := 0 + for _, r := range input { + switch { + case r == '-' && j&1 == 0: + continue + case r >= '0' && r <= '9' && j < 32: + u[j/2] |= byte(r-'0') << uint(4-j&1*4) + case r >= 'a' && r <= 'f' && j < 32: + u[j/2] |= byte(r-'a'+10) << uint(4-j&1*4) + case r >= 'A' && r <= 'F' && j < 32: + u[j/2] |= byte(r-'A'+10) << uint(4-j&1*4) + default: + return UUID{}, fmt.Errorf("invalid UUID %q", input) + } + j += 1 + } + if j != 32 { + return UUID{}, fmt.Errorf("invalid UUID %q", input) + } + return u, nil +} + +// UUIDFromBytes converts a raw byte slice to an UUID. +func UUIDFromBytes(input []byte) (UUID, error) { + var u UUID + if len(input) != 16 { + return u, errors.New("UUIDs must be exactly 16 bytes long") + } + + copy(u[:], input) + return u, nil +} + +func MustRandomUUID() UUID { + uuid, err := RandomUUID() + if err != nil { + panic(err) + } + return uuid +} + +// RandomUUID generates a totally random UUID (version 4) as described in +// RFC 4122. +func RandomUUID() (UUID, error) { + var u UUID + _, err := io.ReadFull(rand.Reader, u[:]) + if err != nil { + return u, err + } + u[6] &= 0x0F // clear version + u[6] |= 0x40 // set version to 4 (random uuid) + u[8] &= 0x3F // clear variant + u[8] |= 0x80 // set to IETF variant + return u, nil +} + +var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix() + +// getTimestamp converts time to UUID (version 1) timestamp. +// It must be an interval of 100-nanoseconds since timeBase. +func getTimestamp(t time.Time) int64 { + utcTime := t.In(time.UTC) + ts := int64(utcTime.Unix()-timeBase)*10000000 + int64(utcTime.Nanosecond()/100) + + return ts +} + +// TimeUUID generates a new time based UUID (version 1) using the current +// time as the timestamp. +func TimeUUID() UUID { + return UUIDFromTime(time.Now()) +} + +// The min and max clock values for a UUID. +// +// Cassandra's TimeUUIDType compares the lsb parts as signed byte arrays. +// Thus, the min value for each byte is -128 and the max is +127. +const ( + minClock = 0x8080 + maxClock = 0x7f7f +) + +// The min and max node values for a UUID. +// +// See explanation about Cassandra's TimeUUIDType comparison logic above. +var ( + minNode = []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80} + maxNode = []byte{0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f} +) + +// MinTimeUUID generates a "fake" time based UUID (version 1) which will be +// the smallest possible UUID generated for the provided timestamp. +// +// UUIDs generated by this function are not unique and are mostly suitable only +// in queries to select a time range of a Cassandra's TimeUUID column. +func MinTimeUUID(t time.Time) UUID { + return TimeUUIDWith(getTimestamp(t), minClock, minNode) +} + +// MaxTimeUUID generates a "fake" time based UUID (version 1) which will be +// the biggest possible UUID generated for the provided timestamp. +// +// UUIDs generated by this function are not unique and are mostly suitable only +// in queries to select a time range of a Cassandra's TimeUUID column. +func MaxTimeUUID(t time.Time) UUID { + return TimeUUIDWith(getTimestamp(t), maxClock, maxNode) +} + +// UUIDFromTime generates a new time based UUID (version 1) as described in +// RFC 4122. This UUID contains the MAC address of the node that generated +// the UUID, the given timestamp and a sequence number. +func UUIDFromTime(t time.Time) UUID { + ts := getTimestamp(t) + clock := atomic.AddUint32(&clockSeq, 1) + + return TimeUUIDWith(ts, clock, hardwareAddr) +} + +// TimeUUIDWith generates a new time based UUID (version 1) as described in +// RFC4122 with given parameters. t is the number of 100's of nanoseconds +// since 15 Oct 1582 (60bits). clock is the number of clock sequence (14bits). +// node is a slice to gurarantee the uniqueness of the UUID (up to 6bytes). +// Note: calling this function does not increment the static clock sequence. +func TimeUUIDWith(t int64, clock uint32, node []byte) UUID { + var u UUID + + u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t) + u[4], u[5] = byte(t>>40), byte(t>>32) + u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48) + + u[8] = byte(clock >> 8) + u[9] = byte(clock) + + copy(u[10:], node) + + u[6] |= 0x10 // set version to 1 (time based uuid) + u[8] &= 0x3F // clear variant + u[8] |= 0x80 // set to IETF variant + + return u +} + +// String returns the UUID in it's canonical form, a 32 digit hexadecimal +// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} + const hexString = "0123456789abcdef" + r := make([]byte, 36) + for i, b := range u { + r[offsets[i]] = hexString[b>>4] + r[offsets[i]+1] = hexString[b&0xF] + } + r[8] = '-' + r[13] = '-' + r[18] = '-' + r[23] = '-' + return string(r) + +} + +// Bytes returns the raw byte slice for this UUID. A UUID is always 128 bits +// (16 bytes) long. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Variant returns the variant of this UUID. This package will only generate +// UUIDs in the IETF variant. +func (u UUID) Variant() int { + x := u[8] + if x&0x80 == 0 { + return VariantNCSCompat + } + if x&0x40 == 0 { + return VariantIETF + } + if x&0x20 == 0 { + return VariantMicrosoft + } + return VariantFuture +} + +// Version extracts the version of this UUID variant. The RFC 4122 describes +// five kinds of UUIDs. +func (u UUID) Version() int { + return int(u[6] & 0xF0 >> 4) +} + +// Node extracts the MAC address of the node who generated this UUID. It will +// return nil if the UUID is not a time based UUID (version 1). +func (u UUID) Node() []byte { + if u.Version() != 1 { + return nil + } + return u[10:] +} + +// Clock extracts the clock sequence of this UUID. It will return zero if the +// UUID is not a time based UUID (version 1). +func (u UUID) Clock() uint32 { + if u.Version() != 1 { + return 0 + } + + // Clock sequence is the lower 14bits of u[8:10] + return uint32(u[8]&0x3F)<<8 | uint32(u[9]) +} + +// Timestamp extracts the timestamp information from a time based UUID +// (version 1). +func (u UUID) Timestamp() int64 { + if u.Version() != 1 { + return 0 + } + return int64(uint64(u[0])<<24|uint64(u[1])<<16| + uint64(u[2])<<8|uint64(u[3])) + + int64(uint64(u[4])<<40|uint64(u[5])<<32) + + int64(uint64(u[6]&0x0F)<<56|uint64(u[7])<<48) +} + +// Time is like Timestamp, except that it returns a time.Time. +func (u UUID) Time() time.Time { + if u.Version() != 1 { + return time.Time{} + } + t := u.Timestamp() + sec := t / 1e7 + nsec := (t % 1e7) * 100 + return time.Unix(sec+timeBase, nsec).UTC() +} + +// Marshaling for JSON +func (u UUID) MarshalJSON() ([]byte, error) { + return []byte(`"` + u.String() + `"`), nil +} + +// Unmarshaling for JSON +func (u *UUID) UnmarshalJSON(data []byte) error { + str := strings.Trim(string(data), `"`) + if len(str) > 36 { + return fmt.Errorf("invalid JSON UUID %s", str) + } + + parsed, err := ParseUUID(str) + if err == nil { + copy(u[:], parsed[:]) + } + + return err +} + +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +func (u *UUID) UnmarshalText(text []byte) (err error) { + *u, err = ParseUUID(string(text)) + return +} diff --git a/vendor/github.com/gocql/gocql/version.go b/vendor/github.com/gocql/gocql/version.go new file mode 100644 index 000000000..015b40e1e --- /dev/null +++ b/vendor/github.com/gocql/gocql/version.go @@ -0,0 +1,28 @@ +package gocql + +import "runtime/debug" + +const ( + mainModule = "github.com/gocql/gocql" +) + +var driverName string + +var driverVersion string + +func init() { + buildInfo, ok := debug.ReadBuildInfo() + if ok { + for _, d := range buildInfo.Deps { + if d.Path == mainModule { + driverName = mainModule + driverVersion = d.Version + if d.Replace != nil { + driverName = d.Replace.Path + driverVersion = d.Replace.Version + } + break + } + } + } +} diff --git a/vendor/github.com/hailocab/go-hostpool/.gitignore b/vendor/github.com/hailocab/go-hostpool/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/hailocab/go-hostpool/.travis.yml b/vendor/github.com/hailocab/go-hostpool/.travis.yml new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hailocab/go-hostpool/LICENSE b/vendor/github.com/hailocab/go-hostpool/LICENSE new file mode 100644 index 000000000..f24db89c4 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Bitly + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hailocab/go-hostpool/README.md b/vendor/github.com/hailocab/go-hostpool/README.md new file mode 100644 index 000000000..7f4437277 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/README.md @@ -0,0 +1,17 @@ +go-hostpool +=========== + +A Go package to intelligently and flexibly pool among multiple hosts from your Go application. +Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are +avoided. +Usage example: + +```go +hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) +hostResponse := hp.Get() +hostname := hostResponse.Host() +err := _ // (make a request with hostname) +hostResponse.Mark(err) +``` + +View more detailed documentation on [godoc.org](http://godoc.org/github.com/bitly/go-hostpool) diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go new file mode 100644 index 000000000..8627aa5cd --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go @@ -0,0 +1,220 @@ +package hostpool + +import ( + "log" + "math/rand" + "time" +) + +type epsilonHostPoolResponse struct { + standardHostPoolResponse + started time.Time + ended time.Time +} + +func (r *epsilonHostPoolResponse) Mark(err error) { + r.Do(func() { + r.ended = time.Now() + doMark(err, r) + }) +} + +type epsilonGreedyHostPool struct { + standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces + epsilon float32 // this is our exploration factor + decayDuration time.Duration + EpsilonValueCalculator // embed the epsilonValueCalculator + timer + quit chan bool +} + +// Construct an Epsilon Greedy HostPool +// +// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state, +// but also to learn about "better" options in terms of speed, and to pick from available hosts +// based on how well they perform. This gives a weighted request rate to better +// performing hosts, while still distributing requests to all hosts (proportionate to their performance). +// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately +// after executing the request to the host, as that will stop the implicitly running request timer. +// +// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +// +// To compute the weighting scores, we perform a weighted average of recent response times, over the course of +// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes +// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time. +func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool { + + if decayDuration <= 0 { + decayDuration = defaultDecayDuration + } + stdHP := New(hosts).(*standardHostPool) + p := &epsilonGreedyHostPool{ + standardHostPool: *stdHP, + epsilon: float32(initialEpsilon), + decayDuration: decayDuration, + EpsilonValueCalculator: calc, + timer: &realTimer{}, + quit: make(chan bool), + } + + // allocate structures + for _, h := range p.hostList { + h.epsilonCounts = make([]int64, epsilonBuckets) + h.epsilonValues = make([]int64, epsilonBuckets) + } + go p.epsilonGreedyDecay() + return p +} + +func (p *epsilonGreedyHostPool) Close() { + // No need to do p.quit <- true as close(p.quit) does the trick. + close(p.quit) +} + +func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { + p.Lock() + defer p.Unlock() + p.epsilon = newEpsilon +} + +func (p *epsilonGreedyHostPool) SetHosts(hosts []string) { + p.Lock() + defer p.Unlock() + p.standardHostPool.setHosts(hosts) + for _, h := range p.hostList { + h.epsilonCounts = make([]int64, epsilonBuckets) + h.epsilonValues = make([]int64, epsilonBuckets) + } +} + +func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { + durationPerBucket := p.decayDuration / epsilonBuckets + ticker := time.NewTicker(durationPerBucket) + for { + select { + case <-p.quit: + ticker.Stop() + return + case <-ticker.C: + p.performEpsilonGreedyDecay() + } + } +} +func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { + p.Lock() + for _, h := range p.hostList { + h.epsilonIndex += 1 + h.epsilonIndex = h.epsilonIndex % epsilonBuckets + h.epsilonCounts[h.epsilonIndex] = 0 + h.epsilonValues[h.epsilonIndex] = 0 + } + p.Unlock() +} + +func (p *epsilonGreedyHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getEpsilonGreedy() + if host == "" { + return nil + } + + started := time.Now() + return &epsilonHostPoolResponse{ + standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, + started: started, + } +} + +func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { + var hostToUse *hostEntry + + // this is our exploration phase + if rand.Float32() < p.epsilon { + p.epsilon = p.epsilon * epsilonDecay + if p.epsilon < minEpsilon { + p.epsilon = minEpsilon + } + return p.getRoundRobin() + } + + // calculate values for each host in the 0..1 range (but not ormalized) + var possibleHosts []*hostEntry + now := time.Now() + var sumValues float64 + for _, h := range p.hostList { + if h.canTryHost(now) { + v := h.getWeightedAverageResponseTime() + if v > 0 { + ev := p.CalcValueFromAvgResponseTime(v) + h.epsilonValue = ev + sumValues += ev + possibleHosts = append(possibleHosts, h) + } + } + } + + if len(possibleHosts) != 0 { + // now normalize to the 0..1 range to get a percentage + for _, h := range possibleHosts { + h.epsilonPercentage = h.epsilonValue / sumValues + } + + // do a weighted random choice among hosts + ceiling := 0.0 + pickPercentage := rand.Float64() + for _, h := range possibleHosts { + ceiling += h.epsilonPercentage + if pickPercentage <= ceiling { + hostToUse = h + break + } + } + } + + if hostToUse == nil { + if len(possibleHosts) != 0 { + log.Println("Failed to randomly choose a host, Dan loses") + } + + return p.getRoundRobin() + } + + if hostToUse.dead { + hostToUse.willRetryHost(p.maxRetryInterval) + } + return hostToUse.host +} + +func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) { + // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic + p.standardHostPool.markSuccess(hostR) + eHostR, ok := hostR.(*epsilonHostPoolResponse) + if !ok { + log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type + return + } + host := eHostR.host + duration := p.between(eHostR.started, eHostR.ended) + + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.epsilonCounts[h.epsilonIndex]++ + h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000) +} + +// --- timer: this just exists for testing + +type timer interface { + between(time.Time, time.Time) time.Duration +} + +type realTimer struct{} + +func (rt *realTimer) between(start time.Time, end time.Time) time.Duration { + return end.Sub(start) +} diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go b/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go new file mode 100644 index 000000000..9bc3102a9 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go @@ -0,0 +1,40 @@ +package hostpool + +// --- Value Calculators ----------------- + +import ( + "math" +) + +// --- Definitions ----------------------- + +// Structs implementing this interface are used to convert the average response time for a host +// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response +// times should yield higher scores (we want to select the faster hosts more often) The default +// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any +// decreasing function from the positive reals to the positive reals should work. +type EpsilonValueCalculator interface { + CalcValueFromAvgResponseTime(float64) float64 +} + +type LinearEpsilonValueCalculator struct{} +type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator } +type PolynomialEpsilonValueCalculator struct { + LinearEpsilonValueCalculator + Exp float64 // the exponent to which we will raise the value to reweight +} + +// -------- Methods ----------------------- + +func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return 1.0 / v +} + +func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + // we need to add 1 to v so that this will be defined on all positive floats + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0)) +} + +func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp)) +} diff --git a/vendor/github.com/hailocab/go-hostpool/host_entry.go b/vendor/github.com/hailocab/go-hostpool/host_entry.go new file mode 100644 index 000000000..dcec9a0b7 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/host_entry.go @@ -0,0 +1,62 @@ +package hostpool + +import ( + "time" +) + +// --- hostEntry - this is due to get upgraded + +type hostEntry struct { + host string + nextRetry time.Time + retryCount int16 + retryDelay time.Duration + dead bool + epsilonCounts []int64 + epsilonValues []int64 + epsilonIndex int + epsilonValue float64 + epsilonPercentage float64 +} + +func (h *hostEntry) canTryHost(now time.Time) bool { + if !h.dead { + return true + } + if h.nextRetry.Before(now) { + return true + } + return false +} + +func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) { + h.retryCount += 1 + newDelay := h.retryDelay * 2 + if newDelay < maxRetryInterval { + h.retryDelay = newDelay + } else { + h.retryDelay = maxRetryInterval + } + h.nextRetry = time.Now().Add(h.retryDelay) +} + +func (h *hostEntry) getWeightedAverageResponseTime() float64 { + var value float64 + var lastValue float64 + + // start at 1 so we start with the oldest entry + for i := 1; i <= epsilonBuckets; i += 1 { + pos := (h.epsilonIndex + i) % epsilonBuckets + bucketCount := h.epsilonCounts[pos] + // Changing the line below to what I think it should be to get the weights right + weight := float64(i) / float64(epsilonBuckets) + if bucketCount > 0 { + currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount) + value += currentValue * weight + lastValue = currentValue + } else { + value += lastValue * weight + } + } + return value +} diff --git a/vendor/github.com/hailocab/go-hostpool/hostpool.go b/vendor/github.com/hailocab/go-hostpool/hostpool.go new file mode 100644 index 000000000..702ca9276 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/hostpool.go @@ -0,0 +1,243 @@ +// A Go package to intelligently and flexibly pool among multiple hosts from your Go application. +// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are +// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +package hostpool + +import ( + "log" + "sync" + "time" +) + +// Returns current version +func Version() string { + return "0.1" +} + +// --- Response interfaces and structs ---- + +// This interface represents the response from HostPool. You can retrieve the +// hostname by calling Host(), and after making a request to the host you should +// call Mark with any error encountered, which will inform the HostPool issuing +// the HostPoolResponse of what happened to the request and allow it to update. +type HostPoolResponse interface { + Host() string + Mark(error) + hostPool() HostPool +} + +type standardHostPoolResponse struct { + host string + sync.Once + pool HostPool +} + +// --- HostPool structs and interfaces ---- + +// This is the main HostPool interface. Structs implementing this interface +// allow you to Get a HostPoolResponse (which includes a hostname to use), +// get the list of all Hosts, and use ResetAll to reset state. +type HostPool interface { + Get() HostPoolResponse + // keep the marks separate so we can override independently + markSuccess(HostPoolResponse) + markFailed(HostPoolResponse) + + ResetAll() + // ReturnUnhealthy when called with true will prevent an unhealthy node from + // being returned and will instead return a nil HostPoolResponse. If using + // this feature then you should check the result of Get for nil + ReturnUnhealthy(v bool) + Hosts() []string + SetHosts([]string) + + // Close the hostpool and release all resources. + Close() +} + +type standardHostPool struct { + sync.RWMutex + hosts map[string]*hostEntry + hostList []*hostEntry + returnUnhealthy bool + initialRetryDelay time.Duration + maxRetryInterval time.Duration + nextHostIndex int +} + +// ------ constants ------------------- + +const epsilonBuckets = 120 +const epsilonDecay = 0.90 // decay the exploration rate +const minEpsilon = 0.01 // explore one percent of the time +const initialEpsilon = 0.3 +const defaultDecayDuration = time.Duration(5) * time.Minute + +// Construct a basic HostPool using the hostnames provided +func New(hosts []string) HostPool { + p := &standardHostPool{ + returnUnhealthy: true, + hosts: make(map[string]*hostEntry, len(hosts)), + hostList: make([]*hostEntry, len(hosts)), + initialRetryDelay: time.Duration(30) * time.Second, + maxRetryInterval: time.Duration(900) * time.Second, + } + + for i, h := range hosts { + e := &hostEntry{ + host: h, + retryDelay: p.initialRetryDelay, + } + p.hosts[h] = e + p.hostList[i] = e + } + + return p +} + +func (r *standardHostPoolResponse) Host() string { + return r.host +} + +func (r *standardHostPoolResponse) hostPool() HostPool { + return r.pool +} + +func (r *standardHostPoolResponse) Mark(err error) { + r.Do(func() { + doMark(err, r) + }) +} + +func doMark(err error, r HostPoolResponse) { + if err == nil { + r.hostPool().markSuccess(r) + } else { + r.hostPool().markFailed(r) + } +} + +// return an entry from the HostPool +func (p *standardHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getRoundRobin() + if host == "" { + return nil + } + + return &standardHostPoolResponse{host: host, pool: p} +} + +func (p *standardHostPool) getRoundRobin() string { + now := time.Now() + hostCount := len(p.hostList) + for i := range p.hostList { + // iterate via sequenece from where we last iterated + currentIndex := (i + p.nextHostIndex) % hostCount + + h := p.hostList[currentIndex] + if !h.dead { + p.nextHostIndex = currentIndex + 1 + return h.host + } + if h.nextRetry.Before(now) { + h.willRetryHost(p.maxRetryInterval) + p.nextHostIndex = currentIndex + 1 + return h.host + } + } + + // all hosts are down and returnUnhealhy is false then return no host + if !p.returnUnhealthy { + return "" + } + + // all hosts are down. re-add them + p.doResetAll() + p.nextHostIndex = 0 + return p.hostList[0].host +} + +func (p *standardHostPool) ResetAll() { + p.Lock() + defer p.Unlock() + p.doResetAll() +} + +func (p *standardHostPool) SetHosts(hosts []string) { + p.Lock() + defer p.Unlock() + p.setHosts(hosts) +} + +func (p *standardHostPool) ReturnUnhealthy(v bool) { + p.Lock() + defer p.Unlock() + p.returnUnhealthy = v +} + +func (p *standardHostPool) setHosts(hosts []string) { + p.hosts = make(map[string]*hostEntry, len(hosts)) + p.hostList = make([]*hostEntry, len(hosts)) + + for i, h := range hosts { + e := &hostEntry{ + host: h, + retryDelay: p.initialRetryDelay, + } + p.hosts[h] = e + p.hostList[i] = e + } +} + +// this actually performs the logic to reset, +// and should only be called when the lock has +// already been acquired +func (p *standardHostPool) doResetAll() { + for _, h := range p.hosts { + h.dead = false + } +} + +func (p *standardHostPool) Close() { + for _, h := range p.hosts { + h.dead = true + } +} + +func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.dead = false +} + +func (p *standardHostPool) markFailed(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + if !h.dead { + h.dead = true + h.retryCount = 0 + h.retryDelay = p.initialRetryDelay + h.nextRetry = time.Now().Add(h.retryDelay) + } + +} +func (p *standardHostPool) Hosts() []string { + hosts := make([]string, 0, len(p.hosts)) + for host := range p.hosts { + hosts = append(hosts, host) + } + return hosts +} diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_helpers.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_helpers.go new file mode 100644 index 000000000..121dce2fe --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_helpers.go @@ -0,0 +1,63 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "kubedb.dev/apimachinery/apis" + "kubedb.dev/apimachinery/apis/catalog" + "kubedb.dev/apimachinery/crds" + + "kmodules.xyz/client-go/apiextensions" +) + +func (_ CassandraVersion) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { + return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourcePluralCassandraVersion)) +} + +var _ apis.ResourceInfo = &CassandraVersion{} + +func (r CassandraVersion) ResourceFQN() string { + return fmt.Sprintf("%s.%s", ResourcePluralCassandraVersion, catalog.GroupName) +} + +func (r CassandraVersion) ResourceShortCode() string { + return ResourceCodeCassandraVersion +} + +func (r CassandraVersion) ResourceKind() string { + return ResourceKindCassandraVersion +} + +func (r CassandraVersion) ResourceSingular() string { + return ResourceSingularCassandraVersion +} + +func (r CassandraVersion) ResourcePlural() string { + return ResourcePluralCassandraVersion +} + +func (r CassandraVersion) ValidateSpecs() error { + if r.Spec.Version == "" || + r.Spec.DB.Image == "" { + return fmt.Errorf(`atleast one of the following specs is not set for CassandraVersion "%v": + spec.version, + spec.db.image`, r.Name) + } + return nil +} diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_types.go new file mode 100644 index 000000000..33d376064 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/cassandra_version_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ResourceCodeCassandraVersion = "csversion" + ResourceKindCassandraVersion = "CassandraVersion" + ResourceSingularCassandraVersion = "cassandraversion" + ResourcePluralCassandraVersion = "cassandraversions" +) + +// CassandraVersion defines a Cassandra database version. + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=updateStatus +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=cassandraversions,singular=cassandraversion,scope=Cluster,shortName=csversion,categories={catalog,kubedb,appscode} +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +// +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" +// +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type CassandraVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CassandraVersionSpec `json:"spec,omitempty"` + Status CassandraVersionStatus `json:"status,omitempty"` +} + +// CassandraVersionSpec defines the desired state of CassandraVersion +type CassandraVersionSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Version + Version string `json:"version"` + + // Database Image + DB CassandraVersionDatabase `json:"db"` + + // Database Image + InitContainer CassandraInitContainer `json:"initContainer"` + + // SecurityContext is for the additional config for the DB container + // +optional + SecurityContext SecurityContext `json:"securityContext"` + + // +optional + UI []ChartInfo `json:"ui,omitempty"` +} + +// CassandraVersionDatabase is the Cassandra Database image +type CassandraVersionDatabase struct { + Image string `json:"image"` +} + +// CassandraInitContainer is the Cassandra init Container image +type CassandraInitContainer struct { + Image string `json:"image"` +} + +// CassandraVersionStatus defines the observed state of CassandraVersion +type CassandraVersionStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CassandraVersionList contains a list of CassandraVersion +type CassandraVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CassandraVersion `json:"items"` +} diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/clickhouse_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/clickhouse_version_types.go index 704ac56fc..88af5f06f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/clickhouse_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/clickhouse_version_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=clickhouseversions,singular=clickhouseversion,scope=Cluster,shortName=chversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=clickhouseversions,singular=clickhouseversion,scope=Cluster,shortName=chversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/druid_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/druid_version_types.go index 078761241..7e6579975 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/druid_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/druid_version_types.go @@ -34,7 +34,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=druidversions,singular=druidversion,scope=Cluster,shortName=drversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=druidversions,singular=druidversion,scope=Cluster,shortName=drversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/elasticsearch_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/elasticsearch_version_types.go index f082208a9..f1a2fde8a 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/elasticsearch_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/elasticsearch_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=elasticsearchversions,singular=elasticsearchversion,scope=Cluster,shortName=esversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=elasticsearchversions,singular=elasticsearchversion,scope=Cluster,shortName=esversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Distribution",type="string",JSONPath=".spec.distribution" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/etcd_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/etcd_version_types.go index dfd6db7c2..50292d9ce 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/etcd_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/etcd_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=etcdversions,singular=etcdversion,scope=Cluster,shortName=etcversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=etcdversions,singular=etcdversion,scope=Cluster,shortName=etcversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/ferretdb_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/ferretdb_version_types.go index 22d0f66fd..c3d53e60e 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/ferretdb_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/ferretdb_version_types.go @@ -32,7 +32,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=ferretdbversions,singular=ferretdbversion,scope=Cluster,shortName=frversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=ferretdbversions,singular=ferretdbversion,scope=Cluster,shortName=frversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_connector_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_connector_version_types.go index df4319f0f..5f4fd77a2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_connector_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_connector_version_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=kafkaconnectorversions,singular=kafkaconnectorversion,scope=Cluster,shortName=kcversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=kafkaconnectorversions,singular=kafkaconnectorversion,scope=Cluster,shortName=kcversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Connector_Image",type="string",JSONPath=".spec.connectorPlugin.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_version_types.go index 98ab8a4a0..2f572896f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/kafka_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=kafkaversions,singular=kafkaversion,scope=Cluster,shortName=kfversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=kafkaversions,singular=kafkaversion,scope=Cluster,shortName=kfversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mariadb_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mariadb_version_types.go index 736f12535..82e8dfdbe 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mariadb_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mariadb_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=mariadbversions,singular=mariadbversion,scope=Cluster,shortName=mariaversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=mariadbversions,singular=mariadbversion,scope=Cluster,shortName=mariaversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/memcached_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/memcached_version_types.go index 9de5b8b43..1ab939be2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/memcached_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/memcached_version_types.go @@ -34,7 +34,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=memcachedversions,singular=memcachedversion,scope=Cluster,shortName=mcversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=memcachedversions,singular=memcachedversion,scope=Cluster,shortName=mcversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mongodb_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mongodb_version_types.go index 89901d759..6a0fbd311 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mongodb_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mongodb_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=mongodbversions,singular=mongodbversion,scope=Cluster,shortName=mgversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=mongodbversions,singular=mongodbversion,scope=Cluster,shortName=mgversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Distribution",type="string",JSONPath=".spec.distribution" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mssql_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mssql_version_types.go index 47498d66a..696503229 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mssql_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mssql_version_types.go @@ -35,7 +35,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=mssqlserverversions,singular=mssqlserverversion,scope=Cluster,shortName=msversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=mssqlserverversions,singular=mssqlserverversion,scope=Cluster,shortName=msversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mysql_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mysql_version_types.go index fc0c34ad9..8b5399fe4 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mysql_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/mysql_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=mysqlversions,singular=mysqlversion,scope=Cluster,shortName=myversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=mysqlversions,singular=mysqlversion,scope=Cluster,shortName=myversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Distribution",type="string",JSONPath=".spec.distribution" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go index 32f78c10b..67ce3010a 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/openapi_generated.go @@ -491,6 +491,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/catalog/v1alpha1.AddonTasks": schema_apimachinery_apis_catalog_v1alpha1_AddonTasks(ref), "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ApicurioInMemory": schema_apimachinery_apis_catalog_v1alpha1_ApicurioInMemory(ref), "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ArchiverSpec": schema_apimachinery_apis_catalog_v1alpha1_ArchiverSpec(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraInitContainer": schema_apimachinery_apis_catalog_v1alpha1_CassandraInitContainer(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersion": schema_apimachinery_apis_catalog_v1alpha1_CassandraVersion(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionDatabase": schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionDatabase(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionList": schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionList(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionSpec": schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionSpec(ref), + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionStatus": schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionStatus(ref), "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ChartInfo": schema_apimachinery_apis_catalog_v1alpha1_ChartInfo(ref), "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ClickHouseInitContainer": schema_apimachinery_apis_catalog_v1alpha1_ClickHouseInitContainer(ref), "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ClickHouseVersion": schema_apimachinery_apis_catalog_v1alpha1_ClickHouseVersion(ref), @@ -25375,6 +25381,212 @@ func schema_apimachinery_apis_catalog_v1alpha1_ArchiverSpec(ref common.Reference } } +func schema_apimachinery_apis_catalog_v1alpha1_CassandraInitContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraInitContainer is the Cassandra init Container image", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"image"}, + }, + }, + } +} + +func schema_apimachinery_apis_catalog_v1alpha1_CassandraVersion(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionSpec", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionStatus"}, + } +} + +func schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionDatabase(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraVersionDatabase is the Cassandra Database image", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"image"}, + }, + }, + } +} + +func schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraVersionList contains a list of CassandraVersion", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersion"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersion"}, + } +} + +func schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraVersionSpec defines the desired state of CassandraVersion", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "db": { + SchemaProps: spec.SchemaProps{ + Description: "Database Image", + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionDatabase"), + }, + }, + "initContainer": { + SchemaProps: spec.SchemaProps{ + Description: "Database Image", + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraInitContainer"), + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext is for the additional config for the DB container", + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.SecurityContext"), + }, + }, + "ui": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/catalog/v1alpha1.ChartInfo"), + }, + }, + }, + }, + }, + }, + Required: []string{"version", "db", "initContainer"}, + }, + }, + Dependencies: []string{ + "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraInitContainer", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.CassandraVersionDatabase", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.ChartInfo", "kubedb.dev/apimachinery/apis/catalog/v1alpha1.SecurityContext"}, + } +} + +func schema_apimachinery_apis_catalog_v1alpha1_CassandraVersionStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraVersionStatus defines the observed state of CassandraVersion", + Type: []string{"object"}, + }, + }, + } +} + func schema_apimachinery_apis_catalog_v1alpha1_ChartInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/perconaxtradb_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/perconaxtradb_version_types.go index 4b95b21bb..372eb15aa 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/perconaxtradb_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/perconaxtradb_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=perconaxtradbversions,singular=perconaxtradbversion,scope=Cluster,shortName=pxversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=perconaxtradbversions,singular=perconaxtradbversion,scope=Cluster,shortName=pxversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgbouncer_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgbouncer_version_types.go index 30b619947..8a16eb653 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgbouncer_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgbouncer_version_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=pgbouncerversions,singular=pgbouncerversion,scope=Cluster,shortName=pbversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=pgbouncerversions,singular=pgbouncerversion,scope=Cluster,shortName=pbversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="PGBOUNCER_IMAGE",type="string",JSONPath=".spec.pgBouncer.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgpool_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgpool_version_types.go index 30f795940..5eafc467b 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgpool_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/pgpool_version_types.go @@ -34,7 +34,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=pgpoolversions,singular=pgpoolversion,scope=Cluster,shortName=ppversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=pgpoolversions,singular=pgpoolversion,scope=Cluster,shortName=ppversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="PGPOOL_IMAGE",type="string",JSONPath=".spec.pgpool.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/postgres_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/postgres_version_types.go index 37a07c48c..2846e67ca 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/postgres_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/postgres_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=postgresversions,singular=postgresversion,scope=Cluster,shortName=pgversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=postgresversions,singular=postgresversion,scope=Cluster,shortName=pgversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Distribution",type="string",JSONPath=".spec.distribution" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/proxysql_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/proxysql_version_types.go index 8266cd910..548373d03 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/proxysql_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/proxysql_version_types.go @@ -33,7 +33,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=proxysqlversions,singular=proxysqlversion,scope=Cluster,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=proxysqlversions,singular=proxysqlversion,scope=Cluster,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="ProxySQL_IMAGE",type="string",JSONPath=".spec.proxysql.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/rabbitmqversion_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/rabbitmqversion_types.go index b642a9f43..9142547cf 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/rabbitmqversion_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/rabbitmqversion_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=rabbitmqversions,singular=rabbitmqversion,scope=Cluster,shortName=rmversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=rabbitmqversions,singular=rabbitmqversion,scope=Cluster,shortName=rmversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/redis_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/redis_version_types.go index abd590682..b0918e615 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/redis_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/redis_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=redisversions,singular=redisversion,scope=Cluster,shortName=rdversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=redisversions,singular=redisversion,scope=Cluster,shortName=rdversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/register.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/register.go index 8bbab4a16..48f41efb9 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/register.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/register.go @@ -54,6 +54,8 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &CassandraVersion{}, + &CassandraVersionList{}, &ClickHouseVersion{}, &ClickHouseVersionList{}, &DruidVersion{}, diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/schemaregistry_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/schemaregistry_version_types.go index cb873d7b8..4626563c9 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/schemaregistry_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/schemaregistry_version_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=schemaregistryversions,singular=schemaregistryversion,scope=Cluster,shortName=ksrversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=schemaregistryversions,singular=schemaregistryversion,scope=Cluster,shortName=ksrversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Distribution",type="string",JSONPath=".spec.distribution" // +kubebuilder:printcolumn:name="REGISTRY_IMAGE",type="string",JSONPath=".spec.registry.image" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/singlestore_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/singlestore_version_types.go index b6b6818a4..f96d4ea59 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/singlestore_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/singlestore_version_types.go @@ -35,7 +35,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=singlestoreversions,singular=singlestoreversion,scope=Cluster,shortName=sdbv,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=singlestoreversions,singular=singlestoreversion,scope=Cluster,shortName=sdbv,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/solr_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/solr_version_types.go index b3143686e..96848d0dd 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/solr_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/solr_version_types.go @@ -36,7 +36,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=solrversions,singular=solrversion,scope=Cluster,shortName=slversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=solrversions,singular=solrversion,scope=Cluster,shortName=slversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zookeeper_version_types.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zookeeper_version_types.go index 53010dd90..84f64b383 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zookeeper_version_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zookeeper_version_types.go @@ -37,7 +37,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=zookeeperversions,singular=zookeeperversion,scope=Cluster,shortName=zkversion,categories={datastore,kubedb,appscode} +// +kubebuilder:resource:path=zookeeperversions,singular=zookeeperversion,scope=Cluster,shortName=zkversion,categories={catalog,kubedb,appscode} // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="DB_IMAGE",type="string",JSONPath=".spec.db.image" // +kubebuilder:printcolumn:name="Deprecated",type="boolean",JSONPath=".spec.deprecated" diff --git a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zz_generated.deepcopy.go b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zz_generated.deepcopy.go index 292382643..90eac572d 100644 --- a/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/kubedb.dev/apimachinery/apis/catalog/v1alpha1/zz_generated.deepcopy.go @@ -98,6 +98,141 @@ func (in *ArchiverSpec) DeepCopy() *ArchiverSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraInitContainer) DeepCopyInto(out *CassandraInitContainer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraInitContainer. +func (in *CassandraInitContainer) DeepCopy() *CassandraInitContainer { + if in == nil { + return nil + } + out := new(CassandraInitContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraVersion) DeepCopyInto(out *CassandraVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraVersion. +func (in *CassandraVersion) DeepCopy() *CassandraVersion { + if in == nil { + return nil + } + out := new(CassandraVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraVersionDatabase) DeepCopyInto(out *CassandraVersionDatabase) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraVersionDatabase. +func (in *CassandraVersionDatabase) DeepCopy() *CassandraVersionDatabase { + if in == nil { + return nil + } + out := new(CassandraVersionDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraVersionList) DeepCopyInto(out *CassandraVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CassandraVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraVersionList. +func (in *CassandraVersionList) DeepCopy() *CassandraVersionList { + if in == nil { + return nil + } + out := new(CassandraVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraVersionSpec) DeepCopyInto(out *CassandraVersionSpec) { + *out = *in + out.DB = in.DB + out.InitContainer = in.InitContainer + in.SecurityContext.DeepCopyInto(&out.SecurityContext) + if in.UI != nil { + in, out := &in.UI, &out.UI + *out = make([]ChartInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraVersionSpec. +func (in *CassandraVersionSpec) DeepCopy() *CassandraVersionSpec { + if in == nil { + return nil + } + out := new(CassandraVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraVersionStatus) DeepCopyInto(out *CassandraVersionStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraVersionStatus. +func (in *CassandraVersionStatus) DeepCopy() *CassandraVersionStatus { + if in == nil { + return nil + } + out := new(CassandraVersionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChartInfo) DeepCopyInto(out *ChartInfo) { *out = *in diff --git a/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/elasticsearchdashboard_types.go b/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/elasticsearchdashboard_types.go index 4a5b448c4..fcabcf41e 100644 --- a/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/elasticsearchdashboard_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/elasticsearch/v1alpha1/elasticsearchdashboard_types.go @@ -88,7 +88,7 @@ type ElasticsearchDashboardStatus struct { // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:shortName=ed,scope=Namespaced +// +kubebuilder:resource:path=elasticsearchdashboards,singular=elasticsearchdashboard,shortName=ed,categories={esstore,kubedb,appscode} // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" diff --git a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connect_cluster_types.go b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connect_cluster_types.go index 41b249d14..d2f3891dd 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connect_cluster_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connect_cluster_types.go @@ -41,7 +41,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kcc,scope=Namespaced +// +kubebuilder:resource:path=connectclusters,singular=connectcluster,shortName=kcc,categories={kfstore,kubedb,appscode} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connector_types.go b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connector_types.go index 8b49060c5..1a1a6f80f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connector_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/connector_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kc,scope=Namespaced +// +kubebuilder:resource:path=connectors,singular=connector,shortName=kc,categories={kfstore,kubedb,appscode} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="ConnectCluster",type="string",JSONPath=".spec.connectClusterRef.name" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/restproxy_types.go b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/restproxy_types.go index 15a6d55c0..8941a850f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/restproxy_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/restproxy_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=krp,scope=Namespaced +// +kubebuilder:resource:path=restproxies,singular=restproxy,shortName=krp,categories={kfstore,kubedb,appscode} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Kafka",type="string",JSONPath=".spec.kafkaRef.name" diff --git a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/schemaregistry_types.go b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/schemaregistry_types.go index 3f9d3f63f..56c19b65f 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/schemaregistry_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kafka/v1alpha1/schemaregistry_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=ksr,scope=Namespaced +// +kubebuilder:resource:path=schemaregistries,singular=schemaregistry,shortName=ksr,categories={kfstore,kubedb,appscode} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go index d732cfd24..f42ae03a3 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/constants.go @@ -1178,11 +1178,22 @@ const ( ) const ( - RabbitMQAMQPPort = 5672 - RabbitMQPeerDiscoveryPort = 4369 - RabbitMQManagementUIPort = 15672 - RabbitMQExporterPort = 15692 - RabbitMQInterNodePort = 25672 + RabbitMQAMQPPort = 5672 + RabbitMQAMQPSPort = 5671 + RabbitMQMQTTPort = 1883 + RabbitMQMQTTPortWithSSL = 8883 + RabbitMQSTOMPPort = 61613 + RabbitMQSTOMPPortWithSSL = 61614 + RabbitMQWebSTOMPPort = 15674 + RabbitMQWebSTOMPPortWithSSL = 15673 + RabbitMQWebMQTTPort = 15675 + RabbitMQWebMQTTPortWithSSL = 15676 + RabbitMQExporterPort = 15692 + RabbitMQExporterPortWithSSL = 15691 + RabbitMQManagementUIPort = 15672 + RabbitMQManagementUIPortWithSSL = 15671 + RabbitMQInterNodePort = 25672 + RabbitMQPeerDiscoveryPort = 4369 RabbitMQVolumeData = "data" RabbitMQVolumeConfig = "rabbitmqconfig" @@ -1209,10 +1220,16 @@ const ( RabbitMQShovelPlugin = "rabbitmq_shovel" RabbitMQShovelManagementPlugin = "rabbitmq_shovel_management" RabbitMQWebDispatchPlugin = "rabbitmq_web_dispatch" + RabbitMQMQTTPlugin = "rabbitmq_mqtt" + RabbitMQWebMQTTPlugin = "rabbitmq_stomp" + RabbitMQSTOMPPlugin = "rabbitmq_web_mqtt" + RabbitMQWebSTOMPPlugin = "rabbitmq_web_stomp" + RabbitMQPrometheusPlugin = "rabbitmq_prometheus" RabbitMQLoopBackUserKey = "loopback_users" RabbitMQLoopBackUserVal = "none" RabbitMQDefaultTCPListenerKey = "listeners.tcp.default" RabbitMQDefaultSSLListenerKey = "listeners.ssl.default" + RabbitMQDefaultSSLListener1Key = "listeners.ssl.1" RabbitMQDefaultTCPListenerVal = "5672" RabbitMQDefaultTLSListenerVal = "5671" RabbitMQQueueMasterLocatorKey = "queue_master_locator" @@ -1246,9 +1263,16 @@ const ( RabbitMQSSLOptionsPrivateKey = "ssl_options.keyfile" RabbitMQSSLOptionsVerifyKey = "ssl_options.verify" RabbitMQSSLOptionsFailIfNoPeerKey = "ssl_options.fail_if_no_peer_cert" - RabbitMQConfigFileName = "rabbitmq.conf" - RabbitMQEnabledPluginsFileName = "enabled_plugins" - RabbitMQHealthCheckerQueueName = "kubedb-system" + RabbitMQSSLPortKey = "ssl.port" + + RabbitMQSSLCAKey = "ssl.cacertfile" + RabbitMQSSLCertKey = "ssl.certfile" + RabbitMQSSLPrivateKey = "ssl.keyfile" + RabbitMQSSLVerifyKey = "ssl.verify" + RabbitMQSSLFailIfNoPeerKey = "ssl.fail_if_no_peer_cert" + RabbitMQConfigFileName = "rabbitmq.conf" + RabbitMQEnabledPluginsFileName = "enabled_plugins" + RabbitMQHealthCheckerQueueName = "kubedb-system" ) // =========================== FerretDB Constants ============================ @@ -1321,6 +1345,19 @@ const ( ClickHouseKeeperFileConfig = "keeper-config.yaml" ) +// =========================== Cassandra Constants ============================ + +const ( + CassandraNativeTCP = 9042 + CassandraVolumeData = "data" + CassandraDataDir = "/var/lib/cassandra" + CassandraContainerName = "cassandra" + CassandraInitContainerName = "cassandra-init" + CassandraRackConfigFile = "rack-config.yaml" + CassandraStandalone = "standalone" + CassandraServerConfigFile = "server-config.yaml" +) + // Resource kind related constants const ( ResourceKindStatefulSet = "StatefulSet" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go index 76db6ccbf..682a854a3 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_helpers.go @@ -300,6 +300,14 @@ func (k *Kafka) SetHealthCheckerDefaults() { } func (k *Kafka) SetDefaults() { + if k.Spec.Halted { + if k.Spec.DeletionPolicy == DeletionPolicyDoNotTerminate { + klog.Errorf(`Can't halt, since deletion policy is 'DoNotTerminate'`) + return + } + k.Spec.DeletionPolicy = DeletionPolicyHalt + } + if k.Spec.DeletionPolicy == "" { k.Spec.DeletionPolicy = DeletionPolicyDelete } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go index 1f097c6c5..f526bc554 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_types.go @@ -40,7 +40,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:storageversion -// +kubebuilder:resource:shortName=kf,scope=Namespaced +// +kubebuilder:resource:path=kafkas,singular=kafka,shortName=kf,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" @@ -93,6 +93,10 @@ type KafkaSpec struct { // +optional KeystoreCredSecret *SecretReference `json:"keystoreCredSecret,omitempty"` + // Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted. + // +optional + Halted bool `json:"halted,omitempty"` + // TLS contains tls configurations // +optional TLS *kmapi.TLSConfig `json:"tls,omitempty"` diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_webhooks.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_webhooks.go index 93a0bbd2c..6a8937ee9 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_webhooks.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/kafka_webhooks.go @@ -166,6 +166,12 @@ func (k *Kafka) ValidateCreateOrUpdate() error { } } + if k.Spec.Halted && k.Spec.DeletionPolicy == DeletionPolicyDoNotTerminate { + allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("halted"), + k.Name, + `can't halt if deletionPolicy is set to "DoNotTerminate"`)) + } + err = k.validateVolumes(k) if err != nil { allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("podTemplate").Child("spec").Child("volumes"), @@ -190,6 +196,11 @@ func (k *Kafka) ValidateCreateOrUpdate() error { k.Name, "StorageType should be either durable or ephemeral")) } + if k.Spec.StorageType == StorageTypeEphemeral && k.Spec.DeletionPolicy == DeletionPolicyHalt { + allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("deletionPolicy"), + k.Name, + `'spec.deletionPolicy: Halt' can not be used for 'Ephemeral' storage`)) + } } if len(allErr) == 0 { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go index 5feb3fc1b..48c310d48 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/mongodb_helpers.go @@ -624,7 +624,11 @@ func (m *MongoDB) SetDefaults(mgVersion *v1alpha1.MongoDBVersion) { } if m.Spec.SSLMode == "" { - m.Spec.SSLMode = SSLModeDisabled + if m.Spec.TLS != nil { + m.Spec.SSLMode = SSLModeRequireSSL + } else { + m.Spec.SSLMode = SSLModeDisabled + } } if (m.Spec.ReplicaSet != nil || m.Spec.ShardTopology != nil) && m.Spec.ClusterAuthMode == "" { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go index 0a5041199..55b4f47b7 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/openapi_generated.go @@ -26681,6 +26681,13 @@ func schema_apimachinery_apis_kubedb_v1_KafkaSpec(ref common.ReferenceCallback) Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1.SecretReference"), }, }, + "halted": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.", + Type: []string{"boolean"}, + Format: "", + }, + }, "tls": { SchemaProps: spec.SchemaProps{ Description: "TLS contains tls configurations", diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_types.go index 7718db295..d58ff1d54 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1/pgbouncer_types.go @@ -39,7 +39,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=pgbouncers,singular=pgbouncer,shortName=pb,categories={proxy,kubedb,appscode,all} +// +kubebuilder:resource:path=pgbouncers,singular=pgbouncer,shortName=pb,categories={datastore,kubedb,appscode,all} // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go new file mode 100644 index 000000000..f82bd9913 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_helpers.go @@ -0,0 +1,322 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "context" + "fmt" + "strconv" + "strings" + + "kubedb.dev/apimachinery/apis" + catalog "kubedb.dev/apimachinery/apis/catalog/v1alpha1" + "kubedb.dev/apimachinery/apis/kubedb" + "kubedb.dev/apimachinery/crds" + + "gomodules.xyz/pointer" + core "k8s.io/api/core/v1" + "k8s.io/api/resource/v1alpha2" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "kmodules.xyz/client-go/apiextensions" + coreutil "kmodules.xyz/client-go/core/v1" + meta_util "kmodules.xyz/client-go/meta" + "kmodules.xyz/client-go/policy/secomp" + appcat "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1" + ofst "kmodules.xyz/offshoot-api/api/v2" +) + +type CassandraApp struct { + *Cassandra +} + +func (r *Cassandra) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { + return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourcePluralCassandra)) +} + +func (r *Cassandra) AppBindingMeta() appcat.AppBindingMeta { + return &CassandraApp{r} +} + +func (r CassandraApp) Name() string { + return r.Cassandra.Name +} + +func (r CassandraApp) Type() appcat.AppType { + return appcat.AppType(fmt.Sprintf("%s/%s", kubedb.GroupName, ResourceSingularCassandra)) +} + +// Owner returns owner reference to resources +func (r *Cassandra) Owner() *meta.OwnerReference { + return meta.NewControllerRef(r, v1alpha2.SchemeGroupVersion.WithKind(r.ResourceKind())) +} + +func (r *Cassandra) ResourceKind() string { + return ResourceKindCassandra +} + +func (r *Cassandra) OffshootName() string { + return r.Name +} + +func (r *Cassandra) OffshootRackName(value string) string { + return meta_util.NameWithSuffix(r.OffshootName(), value) +} + +func (r *Cassandra) OffshootRackPetSetName(rackName string) string { + rack := meta_util.NameWithSuffix("rack", rackName) + return meta_util.NameWithSuffix(r.OffshootName(), rack) +} + +func (r *Cassandra) OffshootLabels() map[string]string { + return r.offshootLabels(r.OffshootSelectors(), nil) +} + +func (r *Cassandra) OffshootRackLabels(petSetName string) map[string]string { + return r.offshootLabels(r.OffshootRackSelectors(petSetName), nil) +} + +func (r *Cassandra) offshootLabels(selector, override map[string]string) map[string]string { + selector[meta_util.ComponentLabelKey] = kubedb.ComponentDatabase + return meta_util.FilterKeys(kubedb.GroupName, selector, meta_util.OverwriteKeys(nil, r.Labels, override)) +} + +func (r *Cassandra) OffshootSelectors(extraSelectors ...map[string]string) map[string]string { + selector := map[string]string{ + meta_util.NameLabelKey: r.ResourceFQN(), + meta_util.InstanceLabelKey: r.Name, + meta_util.ManagedByLabelKey: kubedb.GroupName, + } + return meta_util.OverwriteKeys(selector, extraSelectors...) +} + +func (r *Cassandra) OffshootRackSelectors(petSetName string, extraSelectors ...map[string]string) map[string]string { + selector := map[string]string{ + meta_util.NameLabelKey: r.ResourceFQN(), + meta_util.InstanceLabelKey: r.Name, + meta_util.ManagedByLabelKey: kubedb.GroupName, + meta_util.PartOfLabelKey: petSetName, + } + return meta_util.OverwriteKeys(selector, extraSelectors...) +} + +func (r *Cassandra) ResourceFQN() string { + return fmt.Sprintf("%s.%s", r.ResourcePlural(), kubedb.GroupName) +} + +func (r *Cassandra) ResourcePlural() string { + return ResourcePluralCassandra +} + +func (r *Cassandra) ServiceName() string { + return r.OffshootName() +} + +func (r *Cassandra) PrimaryServiceDNS() string { + return fmt.Sprintf("%s.%s.svc", r.ServiceName(), r.Namespace) +} + +func (r *Cassandra) GoverningServiceName() string { + return meta_util.NameWithSuffix(r.ServiceName(), "pods") +} + +func (r *Cassandra) RackGoverningServiceName(name string) string { + return meta_util.NameWithSuffix(name, "pods") +} + +func (r *Cassandra) RackGoverningServiceDNS(petSetName string, replicaNo int) string { + return fmt.Sprintf("%s-%d.%s.%s.svc", petSetName, replicaNo, r.RackGoverningServiceName(petSetName), r.GetNamespace()) +} + +func (r *Cassandra) GetAuthSecretName() string { + if r.Spec.AuthSecret != nil && r.Spec.AuthSecret.Name != "" { + return r.Spec.AuthSecret.Name + } + return r.DefaultUserCredSecretName("admin") +} + +func (r *Cassandra) ConfigSecretName() string { + return meta_util.NameWithSuffix(r.OffshootName(), "config") +} + +func (r *Cassandra) DefaultUserCredSecretName(username string) string { + return meta_util.NameWithSuffix(r.Name, strings.ReplaceAll(fmt.Sprintf("%s-cred", username), "_", "-")) +} + +func (r *Cassandra) PVCName(alias string) string { + return meta_util.NameWithSuffix(r.Name, alias) +} + +func (r *Cassandra) PetSetName() string { + return r.OffshootName() +} + +func (r *Cassandra) PodLabels(extraLabels ...map[string]string) map[string]string { + return r.offshootLabels(meta_util.OverwriteKeys(r.OffshootSelectors(), extraLabels...), r.Spec.PodTemplate.Labels) +} + +func (r *Cassandra) RackPodLabels(petSetName string, labels map[string]string, extraLabels ...map[string]string) map[string]string { + return r.offshootLabels(meta_util.OverwriteKeys(r.OffshootRackSelectors(petSetName), extraLabels...), labels) +} + +func (r *Cassandra) GetConnectionScheme() string { + scheme := "http" + return scheme +} + +func (r *Cassandra) SetHealthCheckerDefaults() { + if r.Spec.HealthChecker.PeriodSeconds == nil { + r.Spec.HealthChecker.PeriodSeconds = pointer.Int32P(10) + } + if r.Spec.HealthChecker.TimeoutSeconds == nil { + r.Spec.HealthChecker.TimeoutSeconds = pointer.Int32P(10) + } + if r.Spec.HealthChecker.FailureThreshold == nil { + r.Spec.HealthChecker.FailureThreshold = pointer.Int32P(3) + } +} + +func (r *Cassandra) Finalizer() string { + return fmt.Sprintf("%s/%s", apis.Finalizer, r.ResourceSingular()) +} + +func (r *Cassandra) ResourceSingular() string { + return ResourceSingularCassandra +} + +func (r *Cassandra) SetDefaults() { + var csVersion catalog.CassandraVersion + err := DefaultClient.Get(context.TODO(), types.NamespacedName{ + Name: r.Spec.Version, + }, &csVersion) + if err != nil { + klog.Errorf("can't get the cassandra version object %s for %s \n", err.Error(), r.Spec.Version) + return + } + if r.Spec.Topology != nil { + rackName := map[string]bool{} + racks := r.Spec.Topology.Rack + for index, rack := range racks { + if rack.Replicas == nil { + rack.Replicas = pointer.Int32P(1) + } + if rack.Name == "" { + for i := 1; ; i += 1 { + rack.Name = r.OffshootRackName(strconv.Itoa(i)) + if !rackName[rack.Name] { + rackName[rack.Name] = true + break + } + } + } else { + rackName[rack.Name] = true + } + if rack.StorageType == "" { + rack.StorageType = StorageTypeDurable + } + + if rack.PodTemplate == nil { + rack.PodTemplate = &ofst.PodTemplateSpec{} + } + + dbContainer := coreutil.GetContainerByName(rack.PodTemplate.Spec.Containers, kubedb.CassandraContainerName) + if dbContainer != nil && (dbContainer.Resources.Requests == nil && dbContainer.Resources.Limits == nil) { + apis.SetDefaultResourceLimits(&dbContainer.Resources, kubedb.DefaultResources) + } + r.setDefaultContainerSecurityContext(&csVersion, rack.PodTemplate) + racks[index] = rack + } + r.Spec.Topology.Rack = racks + } else { + if r.Spec.Replicas == nil { + r.Spec.Replicas = pointer.Int32P(1) + } + if r.Spec.DeletionPolicy == "" { + r.Spec.DeletionPolicy = TerminationPolicyDelete + } + if r.Spec.StorageType == "" { + r.Spec.StorageType = StorageTypeDurable + } + + if r.Spec.PodTemplate == nil { + r.Spec.PodTemplate = &ofst.PodTemplateSpec{} + } + r.setDefaultContainerSecurityContext(&csVersion, r.Spec.PodTemplate) + dbContainer := coreutil.GetContainerByName(r.Spec.PodTemplate.Spec.Containers, kubedb.CassandraContainerName) + if dbContainer != nil && (dbContainer.Resources.Requests == nil && dbContainer.Resources.Limits == nil) { + apis.SetDefaultResourceLimits(&dbContainer.Resources, kubedb.DefaultResources) + } + r.SetHealthCheckerDefaults() + } +} + +func (r *Cassandra) setDefaultContainerSecurityContext(csVersion *catalog.CassandraVersion, podTemplate *ofst.PodTemplateSpec) { + if podTemplate == nil { + return + } + if podTemplate.Spec.SecurityContext == nil { + podTemplate.Spec.SecurityContext = &core.PodSecurityContext{} + } + if podTemplate.Spec.SecurityContext.FSGroup == nil { + podTemplate.Spec.SecurityContext.FSGroup = csVersion.Spec.SecurityContext.RunAsUser + } + + container := coreutil.GetContainerByName(podTemplate.Spec.Containers, kubedb.CassandraContainerName) + if container == nil { + container = &core.Container{ + Name: kubedb.CassandraContainerName, + } + podTemplate.Spec.Containers = coreutil.UpsertContainer(podTemplate.Spec.Containers, *container) + } + if container.SecurityContext == nil { + container.SecurityContext = &core.SecurityContext{} + } + r.assignDefaultContainerSecurityContext(csVersion, container.SecurityContext) + + initContainer := coreutil.GetContainerByName(podTemplate.Spec.InitContainers, kubedb.CassandraInitContainerName) + if initContainer == nil { + initContainer = &core.Container{ + Name: kubedb.CassandraInitContainerName, + } + podTemplate.Spec.InitContainers = coreutil.UpsertContainer(podTemplate.Spec.InitContainers, *initContainer) + } + if initContainer.SecurityContext == nil { + initContainer.SecurityContext = &core.SecurityContext{} + } + r.assignDefaultContainerSecurityContext(csVersion, initContainer.SecurityContext) +} + +func (r *Cassandra) assignDefaultContainerSecurityContext(csVersion *catalog.CassandraVersion, rc *core.SecurityContext) { + if rc.AllowPrivilegeEscalation == nil { + rc.AllowPrivilegeEscalation = pointer.BoolP(false) + } + if rc.Capabilities == nil { + rc.Capabilities = &core.Capabilities{ + Drop: []core.Capability{"ALL"}, + } + } + if rc.RunAsNonRoot == nil { + rc.RunAsNonRoot = pointer.BoolP(true) + } + if rc.RunAsUser == nil { + rc.RunAsUser = csVersion.Spec.SecurityContext.RunAsUser + } + if rc.SeccompProfile == nil { + rc.SeccompProfile = secomp.DefaultSeccompProfile() + } +} diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_types.go new file mode 100644 index 000000000..b842f7c47 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/cassandra_types.go @@ -0,0 +1,147 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kmapi "kmodules.xyz/client-go/api/v1" + ofst "kmodules.xyz/offshoot-api/api/v2" +) + +const ( + ResourceKindCassandra = "Cassandra" + ResourceSingularCassandra = "cassandra" + ResourcePluralCassandra = "cassandras" + ResourceCodeCassandra = "cs" +) + +// +genclient +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=cassandras,singular=cassandra,shortName=cs,categories={datastore,kubedb,appscode,all} +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type Cassandra struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CassandraSpec `json:"spec,omitempty"` + Status CassandraStatus `json:"status,omitempty"` +} + +// CassandraSpec defines the desired state of Cassandra +type CassandraSpec struct { + // Version of Cassandra to be deployed. + Version string `json:"version"` + + // Number of instances to deploy for a Cassandra database. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // rack + // +optional + Topology *Topology `json:"topology,omitempty"` + + // StorageType can be durable (default) or ephemeral + StorageType StorageType `json:"storageType,omitempty"` + + // Storage to specify how storage shall be used. + Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"` + + // disable security. It disables authentication security of user. + // If unset, default is false + // +optional + DisableSecurity bool `json:"disableSecurity,omitempty"` + + // Database authentication secret + // +optional + AuthSecret *SecretReference `json:"authSecret,omitempty"` + + // ConfigSecret is an optional field to provide custom configuration file for database (i.e. config.properties). + // If specified, this file will be used as configuration file otherwise default configuration file will be used. + // +optional + ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"` + + // PodTemplate is an optional configuration for pods used to expose database + // +optional + PodTemplate *ofst.PodTemplateSpec `json:"podTemplate,omitempty"` + + // ServiceTemplates is an optional configuration for services used to expose database + // +optional + ServiceTemplates []NamedServiceTemplateSpec `json:"serviceTemplates,omitempty"` + + // DeletionPolicy controls the delete operation for database + // +optional + DeletionPolicy TerminationPolicy `json:"deletionPolicy,omitempty"` + + // HealthChecker defines attributes of the health checker + // +optional + // +kubebuilder:default={periodSeconds: 20, timeoutSeconds: 10, failureThreshold: 3} + HealthChecker kmapi.HealthCheckSpec `json:"healthChecker"` +} + +type Topology struct { + // cassandra rack Structure + Rack []RackSpec `json:"rack,omitempty"` +} + +type RackSpec struct { + // rack Name + Name string `json:"name,omitempty"` + // Number of replica for each shard to deploy for a rack. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // PodTemplate is an optional configuration for pods used to expose database + // +optional + PodTemplate *ofst.PodTemplateSpec `json:"podTemplate,omitempty"` + + // Storage to specify how storage shall be used. + Storage *core.PersistentVolumeClaimSpec `json:"storage,omitempty"` + + // StorageType can be durable (default) or ephemeral + StorageType StorageType `json:"storageType,omitempty"` +} + +// CassandraStatus defines the observed state of Cassandra +type CassandraStatus struct { + // Specifies the current phase of the database + // +optional + Phase DatabasePhase `json:"phase,omitempty"` + // observedGeneration is the most recent generation observed for this resource. It corresponds to the + // resource's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Conditions applied to the database, such as approval or denial. + // +optional + Conditions []kmapi.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CassandraList contains a list of Cassandra +type CassandraList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cassandra `json:"items"` +} diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go index 9f83c03dd..ca60ca04b 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/clickhouse_types.go @@ -36,7 +36,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=ch,scope=Namespaced +// +kubebuilder:resource:path=clickhouses,singular=clickhouse,shortName=ch,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_types.go index cd8c297b1..62860264b 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/druid_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=dr,scope=Namespaced +// +kubebuilder:resource:path=druids,singular=druid,shortName=dr,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_types.go index 64fb16550..cdfd9e276 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/ferretdb_types.go @@ -37,7 +37,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=fr,scope=Namespaced +// +kubebuilder:resource:path=ferretdbs,singular=ferretdb,shortName=fr,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".metadata.namespace" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_helpers.go index a17e9ea28..f550ac339 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_helpers.go @@ -299,6 +299,14 @@ func (k *Kafka) SetHealthCheckerDefaults() { } func (k *Kafka) SetDefaults() { + if k.Spec.Halted { + if k.Spec.DeletionPolicy == TerminationPolicyDoNotTerminate { + klog.Errorf(`Can't halt, since deletion policy is 'DoNotTerminate'`) + return + } + k.Spec.DeletionPolicy = TerminationPolicyHalt + } + if k.Spec.DeletionPolicy == "" { k.Spec.DeletionPolicy = TerminationPolicyDelete } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go index 378b4ec02..b4770a0ba 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kf,scope=Namespaced +// +kubebuilder:resource:path=kafkas,singular=kafka,shortName=kf,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" @@ -92,6 +92,10 @@ type KafkaSpec struct { // +optional KeystoreCredSecret *SecretReference `json:"keystoreCredSecret,omitempty"` + // Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted. + // +optional + Halted bool `json:"halted,omitempty"` + // TLS contains tls configurations // +optional TLS *kmapi.TLSConfig `json:"tls,omitempty"` diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_webhook.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_webhook.go index 367ea5f85..b76005bb9 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_webhook.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/kafka_webhook.go @@ -170,6 +170,12 @@ func (k *Kafka) ValidateCreateOrUpdate() error { } } + if k.Spec.Halted && k.Spec.DeletionPolicy == TerminationPolicyDoNotTerminate { + allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("halted"), + k.Name, + `can't halt if deletionPolicy is set to "DoNotTerminate"`)) + } + err := k.validateVersion(k) if err != nil { allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("version"), @@ -201,6 +207,11 @@ func (k *Kafka) ValidateCreateOrUpdate() error { k.Name, "StorageType should be either durable or ephemeral")) } + if k.Spec.StorageType == StorageTypeEphemeral && k.Spec.DeletionPolicy == TerminationPolicyHalt { + allErr = append(allErr, field.Invalid(field.NewPath("spec").Child("deletionPolicy"), + k.Name, + `'spec.deletionPolicy: Halt' can not be used for 'Ephemeral' storage`)) + } } if len(allErr) == 0 { diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go index 6dc46a631..c9ff26270 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/openapi_generated.go @@ -493,6 +493,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Archiver": schema_apimachinery_apis_kubedb_v1alpha2_Archiver(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ArchiverRecovery": schema_apimachinery_apis_kubedb_v1alpha2_ArchiverRecovery(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.AutoOpsSpec": schema_apimachinery_apis_kubedb_v1alpha2_AutoOpsSpec(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra": schema_apimachinery_apis_kubedb_v1alpha2_Cassandra(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraApp": schema_apimachinery_apis_kubedb_v1alpha2_CassandraApp(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraList": schema_apimachinery_apis_kubedb_v1alpha2_CassandraList(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraSpec": schema_apimachinery_apis_kubedb_v1alpha2_CassandraSpec(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraStatus": schema_apimachinery_apis_kubedb_v1alpha2_CassandraStatus(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClickHouse": schema_apimachinery_apis_kubedb_v1alpha2_ClickHouse(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClickHouseKeeperConfig": schema_apimachinery_apis_kubedb_v1alpha2_ClickHouseKeeperConfig(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ClickHouseKeeperNode": schema_apimachinery_apis_kubedb_v1alpha2_ClickHouseKeeperNode(ref), @@ -619,6 +624,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RabbitMQStatus": schema_apimachinery_apis_kubedb_v1alpha2_RabbitMQStatus(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RabbitmqApp": schema_apimachinery_apis_kubedb_v1alpha2_RabbitmqApp(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RabbitmqStatsService": schema_apimachinery_apis_kubedb_v1alpha2_RabbitmqStatsService(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RackSpec": schema_apimachinery_apis_kubedb_v1alpha2_RackSpec(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RecoveryTarget": schema_apimachinery_apis_kubedb_v1alpha2_RecoveryTarget(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Redis": schema_apimachinery_apis_kubedb_v1alpha2_Redis(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RedisClusterSpec": schema_apimachinery_apis_kubedb_v1alpha2_RedisClusterSpec(ref), @@ -650,6 +656,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SolrStatus": schema_apimachinery_apis_kubedb_v1alpha2_SolrStatus(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SystemUserSecretsSpec": schema_apimachinery_apis_kubedb_v1alpha2_SystemUserSecretsSpec(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.TLSPolicy": schema_apimachinery_apis_kubedb_v1alpha2_TLSPolicy(ref), + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Topology": schema_apimachinery_apis_kubedb_v1alpha2_Topology(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ZooKeeper": schema_apimachinery_apis_kubedb_v1alpha2_ZooKeeper(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ZooKeeperApp": schema_apimachinery_apis_kubedb_v1alpha2_ZooKeeperApp(ref), "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.ZooKeeperList": schema_apimachinery_apis_kubedb_v1alpha2_ZooKeeperList(ref), @@ -25473,6 +25480,267 @@ func schema_apimachinery_apis_kubedb_v1alpha2_AutoOpsSpec(ref common.ReferenceCa } } +func schema_apimachinery_apis_kubedb_v1alpha2_Cassandra(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.CassandraStatus"}, + } +} + +func schema_apimachinery_apis_kubedb_v1alpha2_CassandraApp(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Cassandra": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra"), + }, + }, + }, + Required: []string{"Cassandra"}, + }, + }, + Dependencies: []string{ + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra"}, + } +} + +func schema_apimachinery_apis_kubedb_v1alpha2_CassandraList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraList contains a list of Cassandra", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Cassandra"}, + } +} + +func schema_apimachinery_apis_kubedb_v1alpha2_CassandraSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraSpec defines the desired state of Cassandra", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version of Cassandra to be deployed.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Number of instances to deploy for a Cassandra database.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "rack", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Topology"), + }, + }, + "storageType": { + SchemaProps: spec.SchemaProps{ + Description: "StorageType can be durable (default) or ephemeral", + Type: []string{"string"}, + Format: "", + }, + }, + "storage": { + SchemaProps: spec.SchemaProps{ + Description: "Storage to specify how storage shall be used.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + "disableSecurity": { + SchemaProps: spec.SchemaProps{ + Description: "disable security. It disables authentication security of user. If unset, default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "authSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Database authentication secret", + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"), + }, + }, + "configSecret": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigSecret is an optional field to provide custom configuration file for database (i.e. config.properties). If specified, this file will be used as configuration file otherwise default configuration file will be used.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "podTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "PodTemplate is an optional configuration for pods used to expose database", + Ref: ref("kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec"), + }, + }, + "serviceTemplates": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceTemplates is an optional configuration for services used to expose database", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec"), + }, + }, + }, + }, + }, + "deletionPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "DeletionPolicy controls the delete operation for database", + Type: []string{"string"}, + Format: "", + }, + }, + "healthChecker": { + SchemaProps: spec.SchemaProps{ + Description: "HealthChecker defines attributes of the health checker", + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/client-go/api/v1.HealthCheckSpec"), + }, + }, + }, + Required: []string{"version"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/client-go/api/v1.HealthCheckSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.NamedServiceTemplateSpec", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference", "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.Topology"}, + } +} + +func schema_apimachinery_apis_kubedb_v1alpha2_CassandraStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CassandraStatus defines the observed state of Cassandra", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the current phase of the database", + Type: []string{"string"}, + Format: "", + }, + }, + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "observedGeneration is the most recent generation observed for this resource. It corresponds to the resource's generation, which is updated on mutation by the API Server.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions applied to the database, such as approval or denial.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/client-go/api/v1.Condition"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/client-go/api/v1.Condition"}, + } +} + func schema_apimachinery_apis_kubedb_v1alpha2_ClickHouse(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -28295,6 +28563,13 @@ func schema_apimachinery_apis_kubedb_v1alpha2_KafkaSpec(ref common.ReferenceCall Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.SecretReference"), }, }, + "halted": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted.", + Type: []string{"boolean"}, + Format: "", + }, + }, "tls": { SchemaProps: spec.SchemaProps{ Description: "TLS contains tls configurations", @@ -32468,6 +32743,21 @@ func schema_apimachinery_apis_kubedb_v1alpha2_RabbitMQSpec(ref common.ReferenceC Format: "", }, }, + "disabledProtocols": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the RabbitMQ Protocols that are required to be disabled on bootstrap.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, "monitor": { SchemaProps: spec.SchemaProps{ Description: "Monitor is used monitor database instance", @@ -32580,6 +32870,53 @@ func schema_apimachinery_apis_kubedb_v1alpha2_RabbitmqStatsService(ref common.Re } } +func schema_apimachinery_apis_kubedb_v1alpha2_RackSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "rack Name", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Number of replica for each shard to deploy for a rack.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "podTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "PodTemplate is an optional configuration for pods used to expose database", + Ref: ref("kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec"), + }, + }, + "storage": { + SchemaProps: spec.SchemaProps{ + Description: "Storage to specify how storage shall be used.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + "storageType": { + SchemaProps: spec.SchemaProps{ + Description: "StorageType can be durable (default) or ephemeral", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v2.PodTemplateSpec"}, + } +} + func schema_apimachinery_apis_kubedb_v1alpha2_RecoveryTarget(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -34331,6 +34668,34 @@ func schema_apimachinery_apis_kubedb_v1alpha2_TLSPolicy(ref common.ReferenceCall } } +func schema_apimachinery_apis_kubedb_v1alpha2_Topology(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "rack": { + SchemaProps: spec.SchemaProps{ + Description: "cassandra rack Structure", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RackSpec"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "kubedb.dev/apimachinery/apis/kubedb/v1alpha2.RackSpec"}, + } +} + func schema_apimachinery_apis_kubedb_v1alpha2_ZooKeeper(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgbouncer_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgbouncer_types.go index 11c0b5d37..f618d4725 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgbouncer_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/pgbouncer_types.go @@ -39,7 +39,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:resource:path=pgbouncers,singular=pgbouncer,shortName=pb,categories={proxy,kubedb,appscode,all} +// +kubebuilder:resource:path=pgbouncers,singular=pgbouncer,shortName=pb,categories={datastore,kubedb,appscode,all} // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go index 4a49fac32..44a5e1698 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_helpers.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "path/filepath" + "slices" "strings" "kubedb.dev/apimachinery/apis" @@ -128,8 +129,8 @@ func (r *RabbitMQ) GoverningServiceName() string { return meta_util.NameWithSuffix(r.ServiceName(), "pods") } -func (r *RabbitMQ) StandbyServiceName() string { - return meta_util.NameWithPrefix(r.ServiceName(), kubedb.KafkaStandbyServiceSuffix) +func (r *RabbitMQ) DashboardServiceName() string { + return meta_util.NameWithSuffix(r.ServiceName(), "dashboard") } func (r *RabbitMQ) offshootLabels(selector, override map[string]string) map[string]string { @@ -413,6 +414,10 @@ func (r *RabbitMQ) SetHealthCheckerDefaults() { } } +func (r *RabbitMQ) IsProtocolDisabled(protocol RabbitMQProtocol) bool { + return slices.Contains(r.Spec.DisabledProtocols, protocol) +} + func (r *RabbitMQ) ReplicasAreReady(lister pslister.PetSetLister) (bool, string, error) { // Desire number of petSets expectedItems := 1 diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_types.go index 200804218..050c8a954 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/rabbitmq_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=rm,scope=Namespaced +// +kubebuilder:resource:path=rabbitmqs,singular=rabbitmq,shortName=rm,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" @@ -100,6 +100,10 @@ type RabbitMQSpec struct { // +optional Halted bool `json:"halted,omitempty"` + // Indicates that the RabbitMQ Protocols that are required to be disabled on bootstrap. + // +optional + DisabledProtocols []RabbitMQProtocol `json:"disabledProtocols,omitempty"` + // Monitor is used monitor database instance // +optional Monitor *mona.AgentSpec `json:"monitor,omitempty"` @@ -147,6 +151,18 @@ const ( RabbitmqServerCert RabbitMQCertificateAlias = "server" ) +// +kubebuilder:validation:Enum=http;amqp;mqtt;stomp;web_mqtt;web_stomp +type RabbitMQProtocol string + +const ( + RabbitmqProtocolHTTP RabbitMQProtocol = "http" + RabbitmqProtocolAMQP RabbitMQProtocol = "amqp" + RabbitmqProtocolMQTT RabbitMQProtocol = "mqtt" + RabbitmqProtocolSTOMP RabbitMQProtocol = "stomp" + RabbitmqProtocolWEBMQTT RabbitMQProtocol = "web_mqtt" + RabbitmqProtocolWEBSTOMP RabbitMQProtocol = "web_stomp" +) + // RabbitMQList contains a list of RabbitMQ // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/register.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/register.go index e7385bda5..b25ef77fd 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/register.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/register.go @@ -54,6 +54,8 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &Cassandra{}, + &CassandraList{}, &ClickHouse{}, &ClickHouseList{}, &Druid{}, diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_types.go index af13b06d3..da7375461 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/singlestore_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=sdb,scope=Namespaced +// +kubebuilder:resource:path=singlestores,singular=singlestore,shortName=sdb,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go index 0cccf01a2..a54476ff1 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_helpers.go @@ -19,6 +19,7 @@ package v1alpha2 import ( "context" "fmt" + "sort" "strings" "kubedb.dev/apimachinery/apis" @@ -113,13 +114,18 @@ func (s *Solr) Merge(opt map[string]string) map[string]string { } func (s *Solr) Append(opt map[string]string) string { + key := make([]string, 0) + for x := range opt { + key = append(key, x) + } + sort.Strings(key) fl := 0 as := "" - for x, y := range opt { + for _, x := range key { if fl == 1 { as += " " } - as += fmt.Sprintf("%s=%s", x, y) + as += fmt.Sprintf("%s=%s", x, opt[x]) fl = 1 } diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_types.go index 75a3b3c9a..950edb5de 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/solr_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=sl,scope=Namespaced +// +kubebuilder:resource:path=solrs,singular=solr,shortName=sl,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go index b8353244f..8e3d41bb2 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/types.go @@ -124,13 +124,14 @@ const ( TerminationPolicyDoNotTerminate TerminationPolicy = "DoNotTerminate" ) -// +kubebuilder:validation:Enum=primary;standby;stats +// +kubebuilder:validation:Enum=primary;standby;stats;dashboard type ServiceAlias string const ( - PrimaryServiceAlias ServiceAlias = "primary" - StandbyServiceAlias ServiceAlias = "standby" - StatsServiceAlias ServiceAlias = "stats" + PrimaryServiceAlias ServiceAlias = "primary" + StandbyServiceAlias ServiceAlias = "standby" + StatsServiceAlias ServiceAlias = "stats" + DashboardServiceAlias ServiceAlias = "dashboard" ) // +kubebuilder:validation:Enum=DNS;IP;IPv4;IPv6 diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_types.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_types.go index a60b5a470..2e09dd241 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_types.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zookeeper_types.go @@ -39,7 +39,7 @@ const ( // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=zk,scope=Namespaced +// +kubebuilder:resource:path=zookeepers,singular=zookeeper,shortName=zk,categories={datastore,kubedb,appscode,all} // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".apiVersion" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go index 2fcd347bc..75a0fcdfc 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.conversion.go @@ -2004,6 +2004,7 @@ func autoConvert_v1alpha2_KafkaSpec_To_v1_KafkaSpec(in *KafkaSpec, out *v1.Kafka out.AuthSecret = (*v1.SecretReference)(unsafe.Pointer(in.AuthSecret)) out.ConfigSecret = (*corev1.LocalObjectReference)(unsafe.Pointer(in.ConfigSecret)) out.KeystoreCredSecret = (*v1.SecretReference)(unsafe.Pointer(in.KeystoreCredSecret)) + out.Halted = in.Halted out.TLS = (*clientgoapiv1.TLSConfig)(unsafe.Pointer(in.TLS)) out.PodTemplate = in.PodTemplate out.ServiceTemplates = *(*[]v1.NamedServiceTemplateSpec)(unsafe.Pointer(&in.ServiceTemplates)) @@ -2038,6 +2039,7 @@ func autoConvert_v1_KafkaSpec_To_v1alpha2_KafkaSpec(in *v1.KafkaSpec, out *Kafka out.AuthSecret = (*SecretReference)(unsafe.Pointer(in.AuthSecret)) out.ConfigSecret = (*corev1.LocalObjectReference)(unsafe.Pointer(in.ConfigSecret)) out.KeystoreCredSecret = (*SecretReference)(unsafe.Pointer(in.KeystoreCredSecret)) + out.Halted = in.Halted out.TLS = (*clientgoapiv1.TLSConfig)(unsafe.Pointer(in.TLS)) out.PodTemplate = in.PodTemplate out.ServiceTemplates = *(*[]NamedServiceTemplateSpec)(unsafe.Pointer(&in.ServiceTemplates)) diff --git a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go index 5fb64b38d..ec9f6e8e0 100644 --- a/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/kubedb.dev/apimachinery/apis/kubedb/v1alpha2/zz_generated.deepcopy.go @@ -171,6 +171,165 @@ func (in *AutoOpsSpec) DeepCopy() *AutoOpsSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cassandra) DeepCopyInto(out *Cassandra) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cassandra. +func (in *Cassandra) DeepCopy() *Cassandra { + if in == nil { + return nil + } + out := new(Cassandra) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cassandra) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraApp) DeepCopyInto(out *CassandraApp) { + *out = *in + if in.Cassandra != nil { + in, out := &in.Cassandra, &out.Cassandra + *out = new(Cassandra) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraApp. +func (in *CassandraApp) DeepCopy() *CassandraApp { + if in == nil { + return nil + } + out := new(CassandraApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraList) DeepCopyInto(out *CassandraList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cassandra, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraList. +func (in *CassandraList) DeepCopy() *CassandraList { + if in == nil { + return nil + } + out := new(CassandraList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CassandraList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(Topology) + (*in).DeepCopyInto(*out) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(corev1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(SecretReference) + **out = **in + } + if in.ConfigSecret != nil { + in, out := &in.ConfigSecret, &out.ConfigSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(v2.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.ServiceTemplates != nil { + in, out := &in.ServiceTemplates, &out.ServiceTemplates + *out = make([]NamedServiceTemplateSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.HealthChecker.DeepCopyInto(&out.HealthChecker) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraSpec. +func (in *CassandraSpec) DeepCopy() *CassandraSpec { + if in == nil { + return nil + } + out := new(CassandraSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraStatus) DeepCopyInto(out *CassandraStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]apiv1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraStatus. +func (in *CassandraStatus) DeepCopy() *CassandraStatus { + if in == nil { + return nil + } + out := new(CassandraStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClickHouse) DeepCopyInto(out *ClickHouse) { *out = *in @@ -4340,6 +4499,11 @@ func (in *RabbitMQSpec) DeepCopyInto(out *RabbitMQSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DisabledProtocols != nil { + in, out := &in.DisabledProtocols, &out.DisabledProtocols + *out = make([]RabbitMQProtocol, len(*in)) + copy(*out, *in) + } if in.Monitor != nil { in, out := &in.Monitor, &out.Monitor *out = new(monitoringagentapiapiv1.AgentSpec) @@ -4424,6 +4588,37 @@ func (in *RabbitmqStatsService) DeepCopy() *RabbitmqStatsService { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RackSpec) DeepCopyInto(out *RackSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(v2.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(corev1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RackSpec. +func (in *RackSpec) DeepCopy() *RackSpec { + if in == nil { + return nil + } + out := new(RackSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecoveryTarget) DeepCopyInto(out *RecoveryTarget) { *out = *in @@ -5403,6 +5598,29 @@ func (in *TLSPolicy) DeepCopy() *TLSPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.Rack != nil { + in, out := &in.Rack, &out.Rack + *out = make([]RackSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZooKeeper) DeepCopyInto(out *ZooKeeper) { *out = *in diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_clickhouseautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_clickhouseautoscalers.yaml index 194c5665c..da55e7e86 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_clickhouseautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_clickhouseautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: ClickHouseAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_druidautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_druidautoscalers.yaml index e1d99e74c..9815fdae6 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_druidautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_druidautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: DruidAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_elasticsearchautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_elasticsearchautoscalers.yaml index ae47357e6..2ab79abd0 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_elasticsearchautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_elasticsearchautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: ElasticsearchAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_etcdautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_etcdautoscalers.yaml index 3b1bc7267..853b224f1 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_etcdautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_etcdautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: EtcdAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_ferretdbautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_ferretdbautoscalers.yaml index 2a56973ba..b51130a18 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_ferretdbautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_ferretdbautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: FerretDBAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_kafkaautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_kafkaautoscalers.yaml index 0f8c1d6b9..091aee7ea 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_kafkaautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_kafkaautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: KafkaAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mariadbautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mariadbautoscalers.yaml index 2e9946e98..9d0252a87 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mariadbautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mariadbautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: MariaDBAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_memcachedautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_memcachedautoscalers.yaml index 4d8ffedd1..09b5b3854 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_memcachedautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_memcachedautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: MemcachedAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mongodbautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mongodbautoscalers.yaml index fda6184b8..8343eae29 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mongodbautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mongodbautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: MongoDBAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mssqlserverautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mssqlserverautoscalers.yaml index 32dbce09a..839b853be 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mssqlserverautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mssqlserverautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: MSSQLServerAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mysqlautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mysqlautoscalers.yaml index bd6ca6977..b63a38497 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mysqlautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_mysqlautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: MySQLAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_perconaxtradbautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_perconaxtradbautoscalers.yaml index 6ed3e6e24..74313acdd 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_perconaxtradbautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_perconaxtradbautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: PerconaXtraDBAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgbouncerautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgbouncerautoscalers.yaml index b717ba8d5..d9a393e4d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgbouncerautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgbouncerautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: PgBouncerAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgpoolautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgpoolautoscalers.yaml index 8d614ff53..9674aa398 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgpoolautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_pgpoolautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: PgpoolAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_postgresautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_postgresautoscalers.yaml index 79541fd2c..8db44c9c6 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_postgresautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_postgresautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: PostgresAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_proxysqlautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_proxysqlautoscalers.yaml index ba3b60cf4..3ba471d48 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_proxysqlautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_proxysqlautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: ProxySQLAutoscaler @@ -92,6 +92,13 @@ spec: type: string type: object type: object + databaseRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic opsRequestOptions: properties: apply: @@ -103,15 +110,8 @@ spec: timeout: type: string type: object - proxyRef: - properties: - name: - default: "" - type: string - type: object - x-kubernetes-map-type: atomic required: - - proxyRef + - databaseRef type: object status: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_rabbitmqautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_rabbitmqautoscalers.yaml index 7d8eeff61..73a3acba7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_rabbitmqautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_rabbitmqautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: RabbitMQAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redisautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redisautoscalers.yaml index e1e394029..33cbd50ee 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redisautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redisautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: RedisAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redissentinelautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redissentinelautoscalers.yaml index eb1b73b06..638db82ea 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redissentinelautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_redissentinelautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: RedisSentinelAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_singlestoreautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_singlestoreautoscalers.yaml index 7ca240ab3..a01e9d8fd 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_singlestoreautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_singlestoreautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: SinglestoreAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_solrautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_solrautoscalers.yaml index e23f3c80c..c6483647b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_solrautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_solrautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: SolrAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_zookeeperautoscalers.yaml b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_zookeeperautoscalers.yaml index 278334b4b..2d07425fa 100644 --- a/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_zookeeperautoscalers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/autoscaling.kubedb.com_zookeeperautoscalers.yaml @@ -9,7 +9,7 @@ spec: group: autoscaling.kubedb.com names: categories: - - datastore + - autoscaler - kubedb - appscode kind: ZooKeeperAutoscaler diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_cassandraversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_cassandraversions.yaml new file mode 100644 index 000000000..11a67b702 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_cassandraversions.yaml @@ -0,0 +1,95 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: cassandraversions.catalog.kubedb.com +spec: + group: catalog.kubedb.com + names: + categories: + - catalog + - kubedb + - appscode + kind: CassandraVersion + listKind: CassandraVersionList + plural: cassandraversions + shortNames: + - csversion + singular: cassandraversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.version + name: Version + type: string + - jsonPath: .spec.db.image + name: DB_IMAGE + type: string + - jsonPath: .spec.deprecated + name: Deprecated + type: boolean + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + db: + properties: + image: + type: string + required: + - image + type: object + initContainer: + properties: + image: + type: string + required: + - image + type: object + securityContext: + properties: + runAsUser: + format: int64 + type: integer + type: object + ui: + items: + properties: + disable: + type: boolean + name: + type: string + values: + x-kubernetes-preserve-unknown-fields: true + version: + type: string + required: + - name + type: object + type: array + version: + type: string + required: + - db + - initContainer + - version + type: object + status: + type: object + type: object + served: true + storage: true + subresources: {} diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_clickhouseversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_clickhouseversions.yaml index 16e119cbc..4de232c6a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_clickhouseversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_clickhouseversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: ClickHouseVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_druidversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_druidversions.yaml index 412c56b0f..a7617cf43 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_druidversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_druidversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: DruidVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_elasticsearchversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_elasticsearchversions.yaml index 73d030c5d..7bf1424dd 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_elasticsearchversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_elasticsearchversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: ElasticsearchVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_etcdversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_etcdversions.yaml index 910c6f496..a364d7f1a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_etcdversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_etcdversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: EtcdVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_ferretdbversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_ferretdbversions.yaml index 6ccc2c36e..0fc0fc8d6 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_ferretdbversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_ferretdbversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: FerretDBVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaconnectorversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaconnectorversions.yaml index 03d1c33a2..a6a94d3eb 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaconnectorversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaconnectorversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: KafkaConnectorVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaversions.yaml index 269f78c1e..8ffc32535 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_kafkaversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: KafkaVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mariadbversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mariadbversions.yaml index 61d21dc5b..43d6b1497 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mariadbversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mariadbversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: MariaDBVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_memcachedversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_memcachedversions.yaml index 086a3b89f..ad7910b71 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_memcachedversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_memcachedversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: MemcachedVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mongodbversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mongodbversions.yaml index b36a2bb80..6e80ac8c5 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mongodbversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mongodbversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: MongoDBVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mssqlserverversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mssqlserverversions.yaml index 2bd7ea243..034a5e3b5 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mssqlserverversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mssqlserverversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: MSSQLServerVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mysqlversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mysqlversions.yaml index 281bff963..e36a48d22 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mysqlversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_mysqlversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: MySQLVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_perconaxtradbversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_perconaxtradbversions.yaml index 8ce688750..6ad716491 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_perconaxtradbversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_perconaxtradbversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: PerconaXtraDBVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgbouncerversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgbouncerversions.yaml index 14e3f34fc..e4fedcc4c 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgbouncerversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgbouncerversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: PgBouncerVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgpoolversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgpoolversions.yaml index a210f8680..116fbdc1a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgpoolversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_pgpoolversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: PgpoolVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_postgresversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_postgresversions.yaml index e391a69e7..fe4a99650 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_postgresversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_postgresversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: PostgresVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_proxysqlversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_proxysqlversions.yaml index e7f7d7da5..3fc42065b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_proxysqlversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_proxysqlversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: ProxySQLVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_rabbitmqversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_rabbitmqversions.yaml index de42dd183..6cec02f08 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_rabbitmqversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_rabbitmqversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: RabbitMQVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_redisversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_redisversions.yaml index 491600223..e8c967406 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_redisversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_redisversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: RedisVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_schemaregistryversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_schemaregistryversions.yaml index 02ca97d01..dc4756de6 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_schemaregistryversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_schemaregistryversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: SchemaRegistryVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_singlestoreversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_singlestoreversions.yaml index 009f45182..1b42d9c04 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_singlestoreversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_singlestoreversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: SinglestoreVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_solrversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_solrversions.yaml index 64c7213d2..16bc33b5f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_solrversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_solrversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: SolrVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_zookeeperversions.yaml b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_zookeeperversions.yaml index 71d521571..f6c55d77e 100644 --- a/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_zookeeperversions.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/catalog.kubedb.com_zookeeperversions.yaml @@ -9,7 +9,7 @@ spec: group: catalog.kubedb.com names: categories: - - datastore + - catalog - kubedb - appscode kind: ZooKeeperVersion diff --git a/vendor/kubedb.dev/apimachinery/crds/elasticsearch.kubedb.com_elasticsearchdashboards.yaml b/vendor/kubedb.dev/apimachinery/crds/elasticsearch.kubedb.com_elasticsearchdashboards.yaml index 57638c327..76fc5366b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/elasticsearch.kubedb.com_elasticsearchdashboards.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/elasticsearch.kubedb.com_elasticsearchdashboards.yaml @@ -8,6 +8,10 @@ metadata: spec: group: elasticsearch.kubedb.com names: + categories: + - esstore + - kubedb + - appscode kind: ElasticsearchDashboard listKind: ElasticsearchDashboardList plural: elasticsearchdashboards diff --git a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml index bf7f67511..f466abe10 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectclusters.yaml @@ -8,6 +8,10 @@ metadata: spec: group: kafka.kubedb.com names: + categories: + - kfstore + - kubedb + - appscode kind: ConnectCluster listKind: ConnectClusterList plural: connectclusters diff --git a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectors.yaml b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectors.yaml index 98c38328a..30b881662 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectors.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_connectors.yaml @@ -8,6 +8,10 @@ metadata: spec: group: kafka.kubedb.com names: + categories: + - kfstore + - kubedb + - appscode kind: Connector listKind: ConnectorList plural: connectors diff --git a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_restproxies.yaml b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_restproxies.yaml index 957e1274c..eb1c9668f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_restproxies.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_restproxies.yaml @@ -8,6 +8,10 @@ metadata: spec: group: kafka.kubedb.com names: + categories: + - kfstore + - kubedb + - appscode kind: RestProxy listKind: RestProxyList plural: restproxies diff --git a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_schemaregistries.yaml b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_schemaregistries.yaml index e0224644d..febb4ff43 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_schemaregistries.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kafka.kubedb.com_schemaregistries.yaml @@ -8,6 +8,10 @@ metadata: spec: group: kafka.kubedb.com names: + categories: + - kfstore + - kubedb + - appscode kind: SchemaRegistry listKind: SchemaRegistryList plural: schemaregistries diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml new file mode 100644 index 000000000..b395ba98f --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_cassandras.yaml @@ -0,0 +1,6546 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: cassandras.kubedb.com +spec: + group: kubedb.com + names: + categories: + - datastore + - kubedb + - appscode + - all + kind: Cassandra + listKind: CassandraList + plural: cassandras + shortNames: + - cs + singular: cassandra + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .apiVersion + name: Type + type: string + - jsonPath: .spec.version + name: Version + type: string + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + authSecret: + properties: + externallyManaged: + type: boolean + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + configSecret: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + deletionPolicy: + enum: + - Halt + - Delete + - WipeOut + - DoNotTerminate + type: string + disableSecurity: + type: boolean + healthChecker: + default: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + properties: + disableWriteCheck: + type: boolean + failureThreshold: + default: 1 + format: int32 + type: integer + periodSeconds: + default: 10 + format: int32 + type: integer + timeoutSeconds: + default: 10 + format: int32 + type: integer + type: object + podTemplate: + properties: + controller: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + podPlacementPolicy: + default: + name: default + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + generateName: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + x-kubernetes-map-type: atomic + type: array + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + type: object + replicas: + format: int32 + type: integer + serviceTemplates: + items: + properties: + alias: + enum: + - primary + - standby + - stats + - dashboard + type: string + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + clusterIP: + type: string + externalIPs: + items: + type: string + type: array + externalTrafficPolicy: + type: string + healthCheckNodePort: + format: int32 + type: integer + loadBalancerIP: + type: string + loadBalancerSourceRanges: + items: + type: string + type: array + ports: + items: + properties: + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + required: + - port + type: object + type: array + sessionAffinityConfig: + properties: + clientIP: + properties: + timeoutSeconds: + format: int32 + type: integer + type: object + type: object + type: + type: string + type: object + required: + - alias + type: object + type: array + storage: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + storageType: + enum: + - Durable + - Ephemeral + type: string + topology: + properties: + rack: + items: + properties: + name: + type: string + podTemplate: + properties: + controller: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + podPlacementPolicy: + default: + name: default + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + generateName: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + blockOwnerDeletion: + type: boolean + controller: + type: boolean + kind: + type: string + name: + type: string + uid: + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + x-kubernetes-map-type: atomic + type: array + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + type: object + replicas: + format: int32 + type: integer + storage: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + storageType: + enum: + - Durable + - Ephemeral + type: string + type: object + type: array + type: object + version: + type: string + required: + - version + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + observedGeneration: + format: int64 + type: integer + phase: + enum: + - Provisioning + - DataRestoring + - Ready + - Critical + - NotReady + - Halted + - Unknown + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml index adb4e4a70..307d4fca0 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_clickhouses.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: ClickHouse listKind: ClickHouseList plural: clickhouses @@ -6339,6 +6344,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml index 1fb8522c6..6473f6d1c 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_druids.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: Druid listKind: DruidList plural: druids @@ -337,6 +342,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml index 5f562fdbf..3cc06a820 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_elasticsearches.yaml @@ -43674,6 +43674,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml index 55f5741d0..a4a449436 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_etcds.yaml @@ -3916,6 +3916,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml index c0e3f38d7..67a34ccfa 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_ferretdbs.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: FerretDB listKind: FerretDBList plural: ferretdbs @@ -3376,6 +3381,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml index 494d0e48f..db418404a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_kafkas.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: Kafka listKind: KafkaList plural: kafkas @@ -3187,6 +3192,8 @@ spec: type: boolean enableSSL: type: boolean + halted: + type: boolean healthChecker: default: failureThreshold: 3 @@ -16294,6 +16301,8 @@ spec: type: boolean enableSSL: type: boolean + halted: + type: boolean healthChecker: default: failureThreshold: 3 @@ -19600,6 +19609,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml index ede8ee10b..8b1028f32 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mariadbs.yaml @@ -8898,6 +8898,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml index 42ab2baa3..37a924c46 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_memcacheds.yaml @@ -8025,6 +8025,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml index 3aa4f33ee..027fc74ae 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mongodbs.yaml @@ -30037,6 +30037,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml index 50123b441..00f9c4b7a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mssqlservers.yaml @@ -4613,6 +4613,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml index 9b609a859..97279dc2a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_mysqls.yaml @@ -12181,6 +12181,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml index 68e4c55bf..5854c5b92 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_perconaxtradbs.yaml @@ -8881,6 +8881,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml index 5bd115521..5bee45471 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgbouncers.yaml @@ -9,7 +9,7 @@ spec: group: kubedb.com names: categories: - - proxy + - datastore - kubedb - appscode - all @@ -6655,6 +6655,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml index 4ebdd9ea8..92a975ccf 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_pgpools.yaml @@ -3388,6 +3388,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml index 6a128ce3f..16fe0db8d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_postgreses.yaml @@ -9181,6 +9181,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml index 5ed4c42c8..13cf813a4 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_proxysqls.yaml @@ -6615,6 +6615,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml index 4882a717a..120ae96fc 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_rabbitmqs.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: RabbitMQ listKind: RabbitMQList plural: rabbitmqs @@ -66,6 +71,17 @@ spec: type: string disableSecurity: type: boolean + disabledProtocols: + items: + enum: + - http + - amqp + - mqtt + - stomp + - web_mqtt + - web_stomp + type: string + type: array enableSSL: type: boolean halted: @@ -3367,6 +3383,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml index 2dd2e6e41..29920709c 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redises.yaml @@ -8904,6 +8904,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml index a966370f3..6c1139e82 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_redissentinels.yaml @@ -6581,6 +6581,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml index 817a2b064..4b1b0d833 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_singlestores.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: Singlestore listKind: SinglestoreList plural: singlestores @@ -4375,6 +4380,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml index a83ece53e..bc510b157 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_solrs.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: Solr listKind: SolrList plural: solrs @@ -3370,6 +3375,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml index 63f64f9fd..b8de8e77f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/kubedb.com_zookeepers.yaml @@ -8,6 +8,11 @@ metadata: spec: group: kubedb.com names: + categories: + - datastore + - kubedb + - appscode + - all kind: ZooKeeper listKind: ZooKeeperList plural: zookeepers @@ -3369,6 +3374,7 @@ spec: - primary - standby - stats + - dashboard type: string metadata: properties: diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml new file mode 100644 index 000000000..e56b50ac4 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_clickhouseopsrequests.yaml @@ -0,0 +1,131 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: clickhouseopsrequests.ops.kubedb.com +spec: + group: ops.kubedb.com + names: + categories: + - ops + - kubedb + - appscode + kind: ClickHouseOpsRequest + listKind: ClickHouseOpsRequestList + plural: clickhouseopsrequests + shortNames: + - chops + singular: clickhouseopsrequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: Type + type: string + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + apply: + default: IfReady + enum: + - IfReady + - Always + type: string + databaseRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + restart: + type: object + timeout: + type: string + type: + enum: + - Restart + type: string + required: + - databaseRef + - type + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + observedGeneration: + format: int64 + type: integer + pausedBackups: + items: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - name + type: object + type: array + phase: + enum: + - Pending + - Progressing + - Successful + - WaitingForApproval + - Failed + - Approved + - Denied + - Skipped + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml index 0cff6f1dd..e76cea9f6 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_druidopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: DruidOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml index 21cef5a54..3362d5ad9 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_elasticsearchopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: ElasticsearchOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml index 11a55d8f6..bc3674330 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_etcdopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: EtcdOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml index 9da67451d..9700da76f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_ferretdbopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: FerretDBOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml index 9f0472cad..7e87bf6bd 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_kafkaopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: KafkaOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml index 61ae12d24..1e5fe988d 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mariadbopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: MariaDBOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml index 6464037d7..05d12550a 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_memcachedopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: MemcachedOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml index 6bc73778e..7ea91bca3 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mongodbopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: MongoDBOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml new file mode 100644 index 000000000..f7557c266 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mssqlserveropsrequests.yaml @@ -0,0 +1,131 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: mssqlserveropsrequests.ops.kubedb.com +spec: + group: ops.kubedb.com + names: + categories: + - ops + - kubedb + - appscode + kind: MSSQLServerOpsRequest + listKind: MSSQLServerOpsRequestList + plural: mssqlserveropsrequests + shortNames: + - msops + singular: mssqlserveropsrequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: Type + type: string + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + apply: + default: IfReady + enum: + - IfReady + - Always + type: string + databaseRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + restart: + type: object + timeout: + type: string + type: + enum: + - Restart + type: string + required: + - databaseRef + - type + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + observedGeneration: + format: int64 + type: integer + pausedBackups: + items: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - name + type: object + type: array + phase: + enum: + - Pending + - Progressing + - Successful + - WaitingForApproval + - Failed + - Approved + - Denied + - Skipped + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml index 52dac4a88..7c60d23ce 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_mysqlopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: MySQLOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml index 19df2ea26..d2d78cec7 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_perconaxtradbopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: PerconaXtraDBOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml index 58173bda7..818e86fbe 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgbounceropsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: PgBouncerOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml index b45bd7125..66995b94b 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_pgpoolopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: PgpoolOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml index 672be275d..6530c7814 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_postgresopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: PostgresOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml index 399c1f7eb..ad2d959bd 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_proxysqlopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: ProxySQLOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml index b5be8c886..041c5c0ef 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_rabbitmqopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: RabbitMQOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml index 8a5451d60..4e9f853e9 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redisopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: RedisOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml index 0b92921f6..982a0aace 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_redissentinelopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: RedisSentinelOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml index 197abc6c0..58964896f 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_singlestoreopsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: SinglestoreOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml index daa371cc5..aa82131f8 100644 --- a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_solropsrequests.yaml @@ -9,7 +9,7 @@ spec: group: ops.kubedb.com names: categories: - - datastore + - ops - kubedb - appscode kind: SolrOpsRequest diff --git a/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml new file mode 100644 index 000000000..2bfe9e6b5 --- /dev/null +++ b/vendor/kubedb.dev/apimachinery/crds/ops.kubedb.com_zookeeperopsrequests.yaml @@ -0,0 +1,131 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: zookeeperopsrequests.ops.kubedb.com +spec: + group: ops.kubedb.com + names: + categories: + - ops + - kubedb + - appscode + kind: ZooKeeperOpsRequest + listKind: ZooKeeperOpsRequestList + plural: zookeeperopsrequests + shortNames: + - zkops + singular: zookeeperopsrequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: Type + type: string + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + apply: + default: IfReady + enum: + - IfReady + - Always + type: string + databaseRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + restart: + type: object + timeout: + type: string + type: + enum: + - Restart + type: string + required: + - databaseRef + - type + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + observedGeneration: + format: int64 + type: integer + pausedBackups: + items: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - name + type: object + type: array + phase: + enum: + - Pending + - Progressing + - Successful + - WaitingForApproval + - Failed + - Approved + - Denied + - Skipped + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_publishers.yaml b/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_publishers.yaml index 2884fb921..772c36114 100644 --- a/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_publishers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_publishers.yaml @@ -9,10 +9,9 @@ spec: group: postgres.kubedb.com names: categories: - - datastore + - pgstore - kubedb - appscode - - all kind: Publisher listKind: PublisherList plural: publishers diff --git a/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_subscribers.yaml b/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_subscribers.yaml index 8c6251737..9f6295d27 100644 --- a/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_subscribers.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/postgres.kubedb.com_subscribers.yaml @@ -9,10 +9,9 @@ spec: group: postgres.kubedb.com names: categories: - - datastore + - pgstore - kubedb - appscode - - all kind: Subscriber listKind: SubscriberList plural: subscribers diff --git a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mariadbdatabases.yaml b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mariadbdatabases.yaml index 817b0f576..87f49f83e 100644 --- a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mariadbdatabases.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mariadbdatabases.yaml @@ -9,10 +9,9 @@ spec: group: schema.kubedb.com names: categories: - - datastore + - schema - kubedb - appscode - - all kind: MariaDBDatabase listKind: MariaDBDatabaseList plural: mariadbdatabases diff --git a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mongodbdatabases.yaml b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mongodbdatabases.yaml index 47e44d149..740f9c1ab 100644 --- a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mongodbdatabases.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mongodbdatabases.yaml @@ -9,10 +9,9 @@ spec: group: schema.kubedb.com names: categories: - - datastore + - schema - kubedb - appscode - - all kind: MongoDBDatabase listKind: MongoDBDatabaseList plural: mongodbdatabases diff --git a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mysqldatabases.yaml b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mysqldatabases.yaml index 7e17ec37c..b96e38171 100644 --- a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mysqldatabases.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_mysqldatabases.yaml @@ -9,10 +9,9 @@ spec: group: schema.kubedb.com names: categories: - - datastore + - schema - kubedb - appscode - - all kind: MySQLDatabase listKind: MySQLDatabaseList plural: mysqldatabases diff --git a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_postgresdatabases.yaml b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_postgresdatabases.yaml index 2d00ccd5c..227b9bdc5 100644 --- a/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_postgresdatabases.yaml +++ b/vendor/kubedb.dev/apimachinery/crds/schema.kubedb.com_postgresdatabases.yaml @@ -9,10 +9,9 @@ spec: group: schema.kubedb.com names: categories: - - datastore + - schema - kubedb - appscode - - all kind: PostgresDatabase listKind: PostgresDatabaseList plural: postgresdatabases diff --git a/vendor/modules.txt b/vendor/modules.txt index 2d4618fd5..8a89eeef5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -729,6 +729,12 @@ github.com/goccy/go-json/internal/encoder/vm_color_indent github.com/goccy/go-json/internal/encoder/vm_indent github.com/goccy/go-json/internal/errors github.com/goccy/go-json/internal/runtime +# github.com/gocql/gocql v1.6.0 +## explicit; go 1.13 +github.com/gocql/gocql +github.com/gocql/gocql/internal/lru +github.com/gocql/gocql/internal/murmur +github.com/gocql/gocql/internal/streams # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto @@ -798,6 +804,9 @@ github.com/grafadruid/go-druid/builder/toinclude github.com/grafadruid/go-druid/builder/topnmetric github.com/grafadruid/go-druid/builder/types github.com/grafadruid/go-druid/builder/virtualcolumn +# github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed +## explicit +github.com/hailocab/go-hostpool # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap @@ -1552,7 +1561,7 @@ kmodules.xyz/offshoot-api/api/v1 kmodules.xyz/offshoot-api/api/v1/conversion kmodules.xyz/offshoot-api/api/v2 kmodules.xyz/offshoot-api/util -# kubedb.dev/apimachinery v0.47.0-rc.2 +# kubedb.dev/apimachinery v0.47.0-rc.2.0.20240814122107-b1472e3500d3 ## explicit; go 1.22.1 kubedb.dev/apimachinery/apis kubedb.dev/apimachinery/apis/catalog