diff --git a/CHANGELOG.md b/CHANGELOG.md index ac07120891..5608f011f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## master / unreleased +* [CHANGE] Cortex chunks storage has been deprecated and it's now in maintenance mode: all Cortex users are encouraged to migrate to the blocks storage. No new features will be added to the chunks storage. The default Cortex configuration still runs the chunks engine; please check out the [blocks storage doc](https://cortexmetrics.io/docs/blocks-storage/) on how to configure Cortex to run with the blocks storage. #4268 +* [CHANGE] The example Kubernetes manifests (stored at `k8s/`) have been removed due to a lack of proper support and maintenance. #4268 * [CHANGE] Querier / ruler: deprecated `-store.query-chunk-limit` CLI flag (and its respective YAML config option `max_chunks_per_query`) in favour of `-querier.max-fetched-chunks-per-query` (and its respective YAML config option `max_fetched_chunks_per_query`). The new limit specifies the maximum number of chunks that can be fetched in a single query from ingesters and long-term storage: the total number of actual fetched chunks could be 2x the limit, being independently applied when querying ingesters and long-term storage. #4125 * [CHANGE] Alertmanager: allowed to configure the experimental receivers firewall on a per-tenant basis. The following CLI flags (and their respective YAML config options) have been changed and moved to the limits config section: #4143 - `-alertmanager.receivers-firewall.block.cidr-networks` renamed to `-alertmanager.receivers-firewall-block-cidr-networks` diff --git a/Makefile b/Makefile index e84b27c856..8ece245d60 100644 --- a/Makefile +++ b/Makefile @@ -213,10 +213,6 @@ lint: ./pkg/querier/... \ ./pkg/ruler/... - # Validate Kubernetes spec files. Requires: - # https://kubeval.instrumenta.dev - kubeval ./k8s/* - test: go test -tags netgo -timeout 30m -race -count 1 ./... diff --git a/README.md b/README.md index 3a95820126..151a8218cf 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Cortex provides horizontally scalable, highly available, multi-tenant, long term - **Highly available:** When run in a cluster, Cortex can replicate data between machines. This allows you to survive machine failure without gaps in your graphs. - **Multi-tenant:** Cortex can isolate data and queries from multiple different independent Prometheus sources in a single cluster, allowing untrusted parties to share the same cluster. -- **Long term storage:** Cortex supports Amazon DynamoDB, Google Bigtable, Cassandra, S3, GCS and Microsoft Azure for long term storage of metric data. This allows you to durably store data for longer than the lifetime of any single machine, and use this data for long term capacity planning. +- **Long term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long term storage of metric data. This allows you to durably store data for longer than the lifetime of any single machine, and use this data for long term capacity planning. Cortex is a [CNCF](https://cncf.io) incubation project used in several production systems including [Weave Cloud](https://cloud.weave.works) and [Grafana Cloud](https://grafana.com/cloud). Cortex is primarily used as a [remote write](https://prometheus.io/docs/operating/configuration/#remote_write) destination for Prometheus, with a Prometheus-compatible query API. @@ -25,9 +25,8 @@ Read the [getting started guide](https://cortexmetrics.io/docs/getting-started) project. Before deploying Cortex with a permanent storage backend you should read: 1. [An overview of Cortex's architecture](https://cortexmetrics.io/docs/architecture/) -1. [A guide to running Cortex](https://cortexmetrics.io/docs/chunks-storage/running-chunks-storage-in-production/) -1. [Information regarding configuring Cortex](https://cortexmetrics.io/docs/configuration/arguments/) -1. [Steps to run Cortex with Cassandra](https://cortexmetrics.io/docs/chunks-storage/running-chunks-storage-with-cassandra/) +1. [Getting started with Cortex](https://cortexmetrics.io/docs/getting-started/) +1. [Information regarding configuring Cortex](https://cortexmetrics.io/docs/configuration/) For a guide to contributing to Cortex, see the [contributor guidelines](https://cortexmetrics.io/docs/contributing/). diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 7fe3822d4b..263996ff60 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -40,11 +40,6 @@ RUN GO111MODULE=on go get \ github.com/campoy/embedmd@v1.0.0 \ && rm -rf /go/pkg /go/src /root/.cache -# Cannot get it to run together in above go get. -RUN GO111MODULE=on go get \ - github.com/instrumenta/kubeval@v0.16.1 \ - && rm -rf /go/pkg /go/src /root/.cache - ENV NODE_PATH=/usr/lib/node_modules COPY build.sh / ENV GOCACHE=/go/cache diff --git a/docs/_index.md b/docs/_index.md index 368b2500a0..b97ef3a463 100644 --- a/docs/_index.md +++ b/docs/_index.md @@ -21,7 +21,7 @@ Cortex provides horizontally scalable, highly available, multi-tenant, long term - **Highly available:** When run in a cluster, Cortex can replicate data between machines. This allows you to survive machine failure without gaps in your graphs. - **Multi-tenant:** Cortex can isolate data and queries from multiple different independent Prometheus sources in a single cluster, allowing untrusted parties to share the same cluster. -- **Long term storage:** Cortex supports Amazon DynamoDB, Google Bigtable, Cassandra, S3 and GCS for long term storage of metric data. This allows you to durably store data for longer than the lifetime of any single machine, and use this data for long term capacity planning. +- **Long term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long term storage of metric data. This allows you to durably store data for longer than the lifetime of any single machine, and use this data for long term capacity planning. Cortex is a [CNCF](https://cncf.io) incubation project used in several production systems including [Weave Cloud](https://cloud.weave.works) and [Grafana Cloud](https://grafana.com/cloud). Cortex is primarily used as a [remote write](https://prometheus.io/docs/operating/configuration/#remote_write) destination for Prometheus, exposing a Prometheus-compatible query API. @@ -33,7 +33,7 @@ project. Before deploying Cortex with a permanent storage backend you should read: 1. [An overview of Cortex's architecture](architecture.md) -1. [A guide to running Cortex chunks storage](chunks-storage/running-chunks-storage-in-production.md) +1. [Getting started with Cortex](getting-started/_index.md) 1. [Information regarding configuring Cortex](configuration/_index.md) For a guide to contributing to Cortex, see the [contributor guidelines](contributing/). diff --git a/docs/architecture.md b/docs/architecture.md index 0c68e89037..0bf04bb392 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -21,12 +21,12 @@ Incoming samples (writes from Prometheus) are handled by the [distributor](#dist Cortex currently supports two storage engines to store and query the time series: -- Chunks (default) +- Chunks (deprecated) - Blocks The two engines mostly share the same Cortex architecture with few differences outlined in the rest of the document. -### Chunks storage (default) +### Chunks storage (deprecated) The chunks storage stores each single time series into a separate object called _Chunk_. Each Chunk contains the samples for a given period (defaults to 12 hours). Chunks are then indexed by time range and labels, in order to provide a fast lookup across many (over millions) Chunks. diff --git a/docs/chunks-storage/_index.md b/docs/chunks-storage/_index.md index 4d2f8c0cc9..12b7fa965f 100644 --- a/docs/chunks-storage/_index.md +++ b/docs/chunks-storage/_index.md @@ -1,10 +1,12 @@ --- -title: "Chunks Storage" -linkTitle: "Chunks Storage" +title: "Chunks Storage (deprecated)" +linkTitle: "Chunks Storage (deprecated)" weight: 4 menu: --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + The chunks storage is a Cortex storage engine which stores each single time series into a separate object called _chunk_. Each chunk contains the samples for a given period (defaults to 12 hours). Chunks are then indexed by time range and labels, in order to provide a fast lookup across many (over millions) chunks. For this reason, the Cortex chunks storage requires two backend storages: a key-value store for the index and an object store for the chunks. The supported backends for the **index store** are: diff --git a/docs/chunks-storage/aws-tips.md b/docs/chunks-storage/aws-tips.md index cadaa0f16a..d9cf51f105 100644 --- a/docs/chunks-storage/aws-tips.md +++ b/docs/chunks-storage/aws-tips.md @@ -5,6 +5,8 @@ weight: 10 slug: aws-tips --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + This page shares some tips and things to take in consideration when running Cortex chunks storage on AWS. ## AWS Credentials diff --git a/docs/chunks-storage/caching.md b/docs/chunks-storage/caching.md index 9299b91efb..585769dfa8 100644 --- a/docs/chunks-storage/caching.md +++ b/docs/chunks-storage/caching.md @@ -5,6 +5,8 @@ weight: 4 slug: caching --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + Correctly configured caching is important for a production-ready Cortex cluster. Cortex has many opportunities for using caching to accelerate queries and reduce cost. Cortex can use a cache for: diff --git a/docs/chunks-storage/chunks-storage-getting-started.md b/docs/chunks-storage/chunks-storage-getting-started.md index a5a1bd5db2..3a5bf2ccc3 100644 --- a/docs/chunks-storage/chunks-storage-getting-started.md +++ b/docs/chunks-storage/chunks-storage-getting-started.md @@ -5,6 +5,8 @@ weight: 1 slug: getting-started-chunks-storage --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + Cortex can be run as a single binary or as multiple independent microservices. The single-binary mode is easier to deploy and is aimed mainly at users wanting to try out Cortex or develop on it. The microservices mode is intended for production usage, as it allows you to independently scale different services and isolate failures. diff --git a/docs/chunks-storage/ingesters-with-wal.md b/docs/chunks-storage/ingesters-with-wal.md index 975aa2c51b..fb964c87ef 100644 --- a/docs/chunks-storage/ingesters-with-wal.md +++ b/docs/chunks-storage/ingesters-with-wal.md @@ -5,6 +5,8 @@ weight: 5 slug: ingesters-with-wal --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + By default, ingesters running with the chunks storage, store all their data in memory. If there is a crash, there could be loss of data. The Write-Ahead Log (WAL) helps fill this gap in reliability. To use WAL, there are some changes that needs to be made in the deployment. diff --git a/docs/chunks-storage/running-chunks-storage-in-production.md b/docs/chunks-storage/running-chunks-storage-in-production.md index 78e4c0d205..d67f963a0d 100644 --- a/docs/chunks-storage/running-chunks-storage-in-production.md +++ b/docs/chunks-storage/running-chunks-storage-in-production.md @@ -5,6 +5,8 @@ weight: 1 slug: running-chunks-storage-in-production --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + This document builds on the [getting started guide](../getting-started/_index.md) and specifies the steps needed to get Cortex [**chunks storage**](../chunks-storage/_index.md) into production. Ensure you have completed all the steps in the [getting started guide](../getting-started/_index.md) and read about [the Cortex architecture](../architecture.md) before you start this one. diff --git a/docs/chunks-storage/running-chunks-storage-with-cassandra.md b/docs/chunks-storage/running-chunks-storage-with-cassandra.md index 16164ce8cc..fc17c6f661 100644 --- a/docs/chunks-storage/running-chunks-storage-with-cassandra.md +++ b/docs/chunks-storage/running-chunks-storage-with-cassandra.md @@ -5,6 +5,8 @@ weight: 2 slug: running-chunks-storage-with-cassandra --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + This guide covers how to run a single local Cortex instance - with the [**chunks storage**](../chunks-storage/_index.md) engine - storing time series chunks and index in Cassandra. In this guide we're going to: diff --git a/docs/chunks-storage/schema-config.md b/docs/chunks-storage/schema-config.md index 69d144c393..d6389c72fc 100644 --- a/docs/chunks-storage/schema-config.md +++ b/docs/chunks-storage/schema-config.md @@ -5,6 +5,8 @@ weight: 2 slug: schema-configuration --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + Cortex chunks storage stores indexes and chunks in table-based data storages. When such a storage type is used, multiple tables are created over the time: each table - also called periodic table - contains the data for a specific time range. The table-based storage layout is configured through a configuration file called **schema config**. _The schema config is used only by the chunks storage, while it's **not** used by the [blocks storage](../blocks-storage/_index.md) engine._ diff --git a/docs/chunks-storage/table-manager.md b/docs/chunks-storage/table-manager.md index 407245a72b..93d0873e62 100644 --- a/docs/chunks-storage/table-manager.md +++ b/docs/chunks-storage/table-manager.md @@ -5,6 +5,8 @@ weight: 3 slug: table-manager --- +**Warning: the chunks storage is deprecated. You're encouraged to use the [blocks storage](../blocks-storage/_index.md).** + The table-manager is the Cortex service responsible for creating the [periodic tables](./schema-config.md) used to store index and chunks, and deleting them once their data time range exceeds the retention period (if retention is enabled). _For more information about the schema config and periodic tables, please refer to the [Schema config](./schema-config.md) documentation._ diff --git a/docs/configuration/arguments.md b/docs/configuration/arguments.md index ec2f882b36..6ea8f1baa9 100644 --- a/docs/configuration/arguments.md +++ b/docs/configuration/arguments.md @@ -20,7 +20,7 @@ Duration arguments should be specified with a unit like `5s` or `3h`. Valid time - `-querier.query-parallelism` - This refers to database queries against the store (e.g. Bigtable or DynamoDB). This is the max subqueries run in parallel per higher-level query. + This refers to database queries against the store when running the deprecated Cortex chunks storage (e.g. Bigtable or DynamoDB). This is the max subqueries run in parallel per higher-level query. - `-querier.timeout` diff --git a/docs/guides/deleting-series.md b/docs/guides/deleting-series.md index aabae998cd..cfb619d1b8 100644 --- a/docs/guides/deleting-series.md +++ b/docs/guides/deleting-series.md @@ -5,7 +5,7 @@ weight: 10 slug: deleting-series --- -_This feature is currently experimental and is only supported for Chunks storage._ +_This feature is currently experimental and is only supported for Chunks storage (deprecated)._ Cortex supports deletion of series using [Prometheus compatible API](https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series). It however does not support [Prometheuses Clean Tombstones](https://prometheus.io/docs/prometheus/latest/querying/api/#clean-tombstones) API because Cortex uses a different mechanism to manage deletions. diff --git a/docs/guides/encryption-at-rest.md b/docs/guides/encryption-at-rest.md index d75edaa27b..4c3613282f 100644 --- a/docs/guides/encryption-at-rest.md +++ b/docs/guides/encryption-at-rest.md @@ -38,7 +38,7 @@ sse: [kms_encryption_context: | default = ""] ``` -### Chunks storage +### Chunks storage (deprecated) The [chunks storage](../chunks-storage/_index.md) S3 server-side encryption can be configured similarly to the blocks storage, but **per-tenant overrides are not supported**. diff --git a/docs/guides/encryption-at-rest.template b/docs/guides/encryption-at-rest.template index 14f74fa44c..e67a31c26b 100644 --- a/docs/guides/encryption-at-rest.template +++ b/docs/guides/encryption-at-rest.template @@ -20,7 +20,7 @@ The [blocks storage](../blocks-storage/_index.md) S3 server-side encryption can {{ .S3SSEConfigBlock }} -### Chunks storage +### Chunks storage (deprecated) The [chunks storage](../chunks-storage/_index.md) S3 server-side encryption can be configured similarly to the blocks storage, but **per-tenant overrides are not supported**. diff --git a/docs/guides/glossary.md b/docs/guides/glossary.md index a7062db2dd..b0aac0961c 100644 --- a/docs/guides/glossary.md +++ b/docs/guides/glossary.md @@ -11,10 +11,12 @@ The blocks storage is a Cortex storage engine based on Prometheus TSDB, which on For more information, please refer to the [Cortex blocks storage](../blocks-storage/_index.md) documentation. -### Chunks storage +### Chunks storage (deprecated) The chunks storage is a Cortex storage engine which requires both an index store (eg. AWS DynamoDB, Google BigTable, Cassandra, ...) and an object store (eg. AWS S3, Google GCS, ...) as backend storage. +The chunks storage is deprecated. You're encouraged to use the [blocks storage](#blocks-storage) instead. + ### Chunk A chunk is an object containing compressed timestamp-value pairs. diff --git a/docs/guides/grpc-storage-plugin.md b/docs/guides/grpc-storage-plugin.md index 9b2db909c9..e9e3d83a0c 100644 --- a/docs/guides/grpc-storage-plugin.md +++ b/docs/guides/grpc-storage-plugin.md @@ -5,7 +5,7 @@ weight: 10 slug: grpc-based-plugin --- -_This feature is currently experimental and is only supported for Chunks storage._ +_This feature is currently experimental and is only supported for Chunks storage (deprecated)._ Cortex chunks storage supports a **gRPC-based plugin system** to use alternative backends for the index and chunks store. A store plugin is a gRPC-based server which implements the methods required by the index and chunks store. Cortex chunks storage schema is then configured to use the plugin as backend system and gRPC will be used to communicate between Cortex and the plugin. diff --git a/docs/guides/ingesters-rolling-updates.md b/docs/guides/ingesters-rolling-updates.md index 5a981828fc..626c7b8c0f 100644 --- a/docs/guides/ingesters-rolling-updates.md +++ b/docs/guides/ingesters-rolling-updates.md @@ -26,7 +26,7 @@ The new ingester, which is expected to reuse the same disk of the leaving one, w _The blocks storage doesn't support the series [hand-over](#chunks-storage-with-wal-disabled-hand-over)._ -## Chunks storage +## Chunks storage (deprecated) The Cortex chunks storage optionally supports a write-ahead log (WAL). The rolling update procedure for a Cortex cluster running the chunks storage depends whether the WAL is enabled or not. diff --git a/k8s/alertmanager-dep.yaml b/k8s/alertmanager-dep.yaml deleted file mode 100644 index ff1e7578e9..0000000000 --- a/k8s/alertmanager-dep.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: alertmanager -spec: - replicas: 1 - selector: - matchLabels: - name: alertmanager - template: - metadata: - labels: - name: alertmanager - spec: - containers: - - name: alertmanager - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=alertmanager - - -log.level=debug - - -server.http-listen-port=80 - - -alertmanager.configs.url=http://configs.default.svc.cluster.local:80 - - -alertmanager.web.external-url=/alertmanager - ports: - - containerPort: 80 diff --git a/k8s/alertmanager-svc.yaml b/k8s/alertmanager-svc.yaml deleted file mode 100644 index 989feb218e..0000000000 --- a/k8s/alertmanager-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: alertmanager -spec: - ports: - - port: 80 - selector: - name: alertmanager diff --git a/k8s/configs-db-dep.yaml b/k8s/configs-db-dep.yaml deleted file mode 100644 index 2ad52335cf..0000000000 --- a/k8s/configs-db-dep.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: configs-db - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - name: configs-db - template: - metadata: - labels: - name: configs-db - annotations: - prometheus.io.scrape: "false" - spec: - containers: - - name: configs-db - image: postgres:9.6 - imagePullPolicy: IfNotPresent - env: - - name: POSTGRES_DB - value: configs - - name: POSTGRES_HOST_AUTH_METHOD - value: trust - ports: - - containerPort: 5432 diff --git a/k8s/configs-db-svc.yaml b/k8s/configs-db-svc.yaml deleted file mode 100644 index d9c8f72a2c..0000000000 --- a/k8s/configs-db-svc.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: configs-db - namespace: default -spec: - ports: - - port: 5432 - selector: - name: configs-db diff --git a/k8s/configs-dep.yaml b/k8s/configs-dep.yaml deleted file mode 100644 index 0d2d73f40c..0000000000 --- a/k8s/configs-dep.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: configs -spec: - replicas: 1 - selector: - matchLabels: - name: configs - template: - metadata: - labels: - name: configs - spec: - containers: - - name: configs - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=configs - - -server.http-listen-port=80 - - -configs.database.uri=postgres://postgres@configs-db.default.svc.cluster.local/configs?sslmode=disable - - -configs.database.migrations-dir=/migrations - ports: - - containerPort: 80 diff --git a/k8s/configs-svc.yaml b/k8s/configs-svc.yaml deleted file mode 100644 index 554351bfaf..0000000000 --- a/k8s/configs-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: configs -spec: - ports: - - port: 80 - selector: - name: configs diff --git a/k8s/consul-dep.yaml b/k8s/consul-dep.yaml deleted file mode 100644 index 4f3cea6d61..0000000000 --- a/k8s/consul-dep.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: consul -spec: - replicas: 1 - selector: - matchLabels: - name: consul - template: - metadata: - labels: - name: consul - spec: - containers: - - name: consul - image: consul:0.7.1 - imagePullPolicy: IfNotPresent - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -bootstrap - env: - - name: CHECKPOINT_DISABLE - value: "1" - ports: - - name: server-noscrape - containerPort: 8300 - - name: serf-noscrape - containerPort: 8301 - - name: client-noscrape - containerPort: 8400 - - name: http-noscrape - containerPort: 8500 diff --git a/k8s/consul-svc.yaml b/k8s/consul-svc.yaml deleted file mode 100644 index e94666fd13..0000000000 --- a/k8s/consul-svc.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: consul -spec: - ports: - - name: http - port: 8500 - selector: - name: consul diff --git a/k8s/distributor-dep.yaml b/k8s/distributor-dep.yaml deleted file mode 100644 index aa1103c0ee..0000000000 --- a/k8s/distributor-dep.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: distributor -spec: - replicas: 1 - selector: - matchLabels: - name: distributor - template: - metadata: - labels: - name: distributor - spec: - containers: - - name: distributor - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=distributor - - -log.level=debug - - -server.http-listen-port=80 - - -consul.hostname=consul.default.svc.cluster.local:8500 - - -distributor.replication-factor=1 - ports: - - containerPort: 80 diff --git a/k8s/distributor-svc.yaml b/k8s/distributor-svc.yaml deleted file mode 100644 index 73e26d8442..0000000000 --- a/k8s/distributor-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: distributor -spec: - ports: - - port: 80 - selector: - name: distributor diff --git a/k8s/dynamodb-dep.yaml b/k8s/dynamodb-dep.yaml deleted file mode 100644 index 7d85f0e6a8..0000000000 --- a/k8s/dynamodb-dep.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: dynamodb -spec: - replicas: 1 - selector: - matchLabels: - name: dynamodb - template: - metadata: - labels: - name: dynamodb - annotations: - prometheus.io.scrape: "false" - spec: - containers: - - name: dynamodb - image: amazon/dynamodb-local:1.11.477 - imagePullPolicy: IfNotPresent - args: ['-jar', 'DynamoDBLocal.jar', '-inMemory', '-sharedDb'] - ports: - - containerPort: 8000 diff --git a/k8s/dynamodb-svc.yaml b/k8s/dynamodb-svc.yaml deleted file mode 100644 index 0f20e14001..0000000000 --- a/k8s/dynamodb-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: dynamodb -spec: - ports: - - port: 8000 - selector: - name: dynamodb diff --git a/k8s/ingester-dep.yaml b/k8s/ingester-dep.yaml deleted file mode 100644 index da4ec77670..0000000000 --- a/k8s/ingester-dep.yaml +++ /dev/null @@ -1,68 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ingester -spec: - replicas: 1 - selector: - matchLabels: - name: ingester - - # Ingesters are not ready for at least 1 min - # after creation. This has to be in sync with - # the ring timeout value, as this will stop a - # stampede of new ingesters if we should loose - # some. - minReadySeconds: 60 - - # Having maxSurge 0 and maxUnavailable 1 means - # the deployment will update one ingester at a time - # as it will have to stop one (making one unavailable) - # before it can start one (surge of zero) - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - - template: - metadata: - labels: - name: ingester - spec: - # Give ingesters 40 minutes grace to flush chunks and exit cleanly. - # Service is available during this time, as long as we don't stop - # too many ingesters at once. - terminationGracePeriodSeconds: 2400 - - containers: - - name: ingester - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=ingester - - -ingester.join-after=0s - - -ingester.min-ready-duration=0s - - -consul.hostname=consul.default.svc.cluster.local:8500 - - -s3.url=s3://cortex:supersecret@s3.default.svc.cluster.local:9000/cortex - - -s3.force-path-style=true - - -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000 - - -schema-config-file=/etc/cortex/schema.yaml - - -store.chunks-cache.memcached.hostname=memcached.default.svc.cluster.local - - -store.chunks-cache.memcached.timeout=100ms - - -store.chunks-cache.memcached.service=memcached - ports: - - containerPort: 80 - readinessProbe: - httpGet: - path: /ready - port: 80 - initialDelaySeconds: 15 - timeoutSeconds: 1 - volumeMounts: - - name: config-volume - mountPath: /etc/cortex - volumes: - - name: config-volume - configMap: - name: schema-config diff --git a/k8s/ingester-svc.yaml b/k8s/ingester-svc.yaml deleted file mode 100644 index 122e97d70b..0000000000 --- a/k8s/ingester-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ingester -spec: - ports: - - port: 80 - selector: - name: ingester diff --git a/k8s/memcached-dep.yaml b/k8s/memcached-dep.yaml deleted file mode 100644 index ce66ee1d86..0000000000 --- a/k8s/memcached-dep.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: memcached -spec: - replicas: 1 - selector: - matchLabels: - name: memcached - template: - metadata: - labels: - name: memcached - annotations: - prometheus.io.scrape: "false" - spec: - containers: - - name: memcached - image: memcached:1.4.25 - imagePullPolicy: IfNotPresent - args: - - -m 64 # Maximum memory to use, in megabytes. 64MB is default. - - -p 11211 # Default port, but being explicit is nice. - ports: - - name: clients - containerPort: 11211 diff --git a/k8s/memcached-svc.yaml b/k8s/memcached-svc.yaml deleted file mode 100644 index 6146f8ba03..0000000000 --- a/k8s/memcached-svc.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: memcached -spec: - # The memcache client uses DNS to get a list of memcached servers and then - # uses a consistent hash of the key to determine which server to pick. - clusterIP: None - ports: - - name: memcached - port: 11211 - - name: prom - port: 9150 - selector: - name: memcached diff --git a/k8s/nginx-config.yaml b/k8s/nginx-config.yaml deleted file mode 100644 index 726c187529..0000000000 --- a/k8s/nginx-config.yaml +++ /dev/null @@ -1,53 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx -data: - nginx.conf: |- - worker_processes 5; ## Default: 1 - error_log /dev/stderr; - pid /tmp/nginx.pid; - worker_rlimit_nofile 8192; - - events { - worker_connections 4096; ## Default: 1024 - } - - http { - default_type application/octet-stream; - log_format main '$remote_addr - $remote_user [$time_local] $status ' - '"$request" $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - access_log /dev/stderr main; - sendfile on; - tcp_nopush on; - resolver kube-dns.kube-system.svc.cluster.local; - - server { # simple reverse-proxy - listen 80; - proxy_set_header X-Scope-OrgID 0; - - location = /api/v1/push { - proxy_pass http://distributor.default.svc.cluster.local$request_uri; - } - - location = /api/prom/push { - proxy_pass http://distributor.default.svc.cluster.local$request_uri; - } - - location = /ring { - proxy_pass http://distributor.default.svc.cluster.local$request_uri; - } - location = /all_user_stats { - proxy_pass http://distributor.default.svc.cluster.local$request_uri; - } - - location ~ /prometheus/.* { - proxy_pass http://query-frontend.default.svc.cluster.local$request_uri; - } - - location ~ /api/prom/.* { - proxy_pass http://query-frontend.default.svc.cluster.local$request_uri; - } - } - } diff --git a/k8s/nginx-dep.yaml b/k8s/nginx-dep.yaml deleted file mode 100644 index f1bcfea661..0000000000 --- a/k8s/nginx-dep.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx -spec: - replicas: 1 - selector: - matchLabels: - name: nginx - template: - metadata: - labels: - name: nginx - annotations: - prometheus.io.scrape: "false" - spec: - containers: - - name: nginx - image: nginx - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 80 - volumeMounts: - - name: config-volume - mountPath: /etc/nginx - volumes: - - name: config-volume - configMap: - name: nginx diff --git a/k8s/nginx-svc.yaml b/k8s/nginx-svc.yaml deleted file mode 100644 index acd105cb2e..0000000000 --- a/k8s/nginx-svc.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: nginx -spec: - type: NodePort - ports: - - name: http - port: 80 - nodePort: 30080 - selector: - name: nginx diff --git a/k8s/querier-dep.yaml b/k8s/querier-dep.yaml deleted file mode 100644 index 9db34dd186..0000000000 --- a/k8s/querier-dep.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: querier -spec: - replicas: 1 - selector: - matchLabels: - name: querier - template: - metadata: - labels: - name: querier - spec: - containers: - - name: querier - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=querier - - -server.http-listen-port=80 - - -consul.hostname=consul.default.svc.cluster.local:8500 - - -s3.url=s3://cortex:supersecret@s3.default.svc.cluster.local:9000/cortex - - -s3.force-path-style=true - - -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000 - - -schema-config-file=/etc/cortex/schema.yaml - - -querier.frontend-address=query-frontend.default.svc.cluster.local:9095 - - -store.chunks-cache.memcached.hostname=memcached.default.svc.cluster.local - - -store.chunks-cache.memcached.timeout=100ms - - -store.chunks-cache.memcached.service=memcached - - -distributor.replication-factor=1 - ports: - - containerPort: 80 - volumeMounts: - - name: config-volume - mountPath: /etc/cortex - volumes: - - name: config-volume - configMap: - name: schema-config diff --git a/k8s/querier-svc.yaml b/k8s/querier-svc.yaml deleted file mode 100644 index 081185074a..0000000000 --- a/k8s/querier-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: querier -spec: - ports: - - port: 80 - selector: - name: querier diff --git a/k8s/query-frontend-dep.yaml b/k8s/query-frontend-dep.yaml deleted file mode 100644 index b01adbbbfc..0000000000 --- a/k8s/query-frontend-dep.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-frontend -spec: - replicas: 1 - selector: - matchLabels: - name: query-frontend - template: - metadata: - labels: - name: query-frontend - spec: - containers: - - name: query-frontend - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=query-frontend - - -log.level=debug - - -server.http-listen-port=80 - - -server.grpc-listen-port=9095 - ports: - - containerPort: 9095 - name: grpc - - containerPort: 80 - name: http diff --git a/k8s/query-frontend-svc.yaml b/k8s/query-frontend-svc.yaml deleted file mode 100644 index caa2a2db12..0000000000 --- a/k8s/query-frontend-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: query-frontend -spec: - # clusterIP: None gives a "headless" service so DNS returns all endpoints. - clusterIP: None - ports: - - port: 9095 - name: grpc - - port: 80 - name: http - selector: - name: query-frontend diff --git a/k8s/retrieval-config.yaml b/k8s/retrieval-config.yaml deleted file mode 100644 index e806d423f5..0000000000 --- a/k8s/retrieval-config.yaml +++ /dev/null @@ -1,124 +0,0 @@ ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: retrieval-config -data: - prometheus.yml: |- - global: - scrape_interval: 30s # By default, scrape targets every 15 seconds. - - remote_write: - - url: http://nginx.default.svc.cluster.local:80/api/v1/push - - scrape_configs: - - job_name: 'kubernetes-pods' - kubernetes_sd_configs: - - role: pod - - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - # You can specify the following annotations (on pods): - # prometheus.io.scrape: false - don't scrape this pod - # prometheus.io.scheme: https - use https for scraping - # prometheus.io.port - scrape this port - # prometheus.io.path - scrape this path - relabel_configs: - - # Always use HTTPS for the api server - - source_labels: [__meta_kubernetes_service_label_component] - regex: apiserver - action: replace - target_label: __scheme__ - replacement: https - - # Drop anything annotated with prometheus.io.scrape=false - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: drop - regex: false - - # Drop any endpoint who's pod port name ends with -noscrape - - source_labels: [__meta_kubernetes_pod_container_port_name] - action: drop - regex: .*-noscrape - - # Allow pods to override the scrape scheme with prometheus.io.scheme=https - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: ^(https?)$ - replacement: $1 - - # Allow service to override the scrape path with prometheus.io.path=/other_metrics_path - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: ^(.+)$ - replacement: $1 - - # Allow services to override the scrape port with prometheus.io.port=1234 - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: (.+?)(\:\d+)?;(\d+) - replacement: $1:$3 - - # Drop pods without a name label - - source_labels: [__meta_kubernetes_pod_label_name] - action: drop - regex: ^$ - - # Rename jobs to be / - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_name] - action: replace - separator: / - target_label: job - replacement: $1 - - # Rename instances to be the pod name - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: instance - - # Include node name as a extra field - - source_labels: [__meta_kubernetes_pod_node_name] - target_label: node - - # This scrape config gather all nodes - - job_name: 'kubernetes-nodes' - kubernetes_sd_configs: - - role: node - - # couldn't get prometheus to validate the kublet cert for scraping, so don't bother for now - tls_config: - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - relabel_configs: - - target_label: __scheme__ - replacement: https - - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] - target_label: instance - - # This scrape config just pulls in the default/kubernetes service - - job_name: 'kubernetes-service' - kubernetes_sd_configs: - - role: endpoints - - tls_config: - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - relabel_configs: - - source_labels: [__meta_kubernetes_service_label_component] - regex: apiserver - action: keep - - - target_label: __scheme__ - replacement: https - - - source_labels: [] - target_label: job - replacement: default/kubernetes diff --git a/k8s/retrieval-dep.yaml b/k8s/retrieval-dep.yaml deleted file mode 100644 index dbe0e17b5f..0000000000 --- a/k8s/retrieval-dep.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: retrieval -rules: -- apiGroups: [""] - resources: - - nodes - - nodes/proxy - - services - - endpoints - - pods - verbs: ["get", "list", "watch"] -- nonResourceURLs: ["/metrics"] - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: retrieval - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: retrieval -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: retrieval -subjects: -- kind: ServiceAccount - name: retrieval - namespace: default ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: retrieval -spec: - replicas: 1 - selector: - matchLabels: - name: retrieval - template: - metadata: - labels: - name: retrieval - spec: - serviceAccountName: retrieval - containers: - - name: retrieval - image: prom/prometheus:v2.8.0 - imagePullPolicy: IfNotPresent - args: - - --config.file=/etc/prometheus/prometheus.yml - ports: - - containerPort: 9090 - volumeMounts: - - name: config-volume - mountPath: /etc/prometheus - volumes: - - name: config-volume - configMap: - name: retrieval-config diff --git a/k8s/retrieval-svc.yaml b/k8s/retrieval-svc.yaml deleted file mode 100644 index 2653855e60..0000000000 --- a/k8s/retrieval-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: retrieval -spec: - ports: - - port: 9090 - selector: - name: retrieval diff --git a/k8s/ruler-dep.yaml b/k8s/ruler-dep.yaml deleted file mode 100644 index f8b939514d..0000000000 --- a/k8s/ruler-dep.yaml +++ /dev/null @@ -1,43 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ruler -spec: - replicas: 1 - selector: - matchLabels: - name: ruler - template: - metadata: - labels: - name: ruler - spec: - containers: - - name: ruler - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=ruler - - -log.level=debug - - -server.http-listen-port=80 - - -ruler.configs.url=http://configs.default.svc.cluster.local:80 - - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager/ - - -consul.hostname=consul.default.svc.cluster.local:8500 - - -s3.url=s3://cortex:supersecret@default.svc.cluster.local:9000/cortex - - -s3.force-path-style=true - - -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000 - - -schema-config-file=/etc/cortex/schema.yaml - - -store.chunks-cache.memcached.hostname=memcached.default.svc.cluster.local - - -store.chunks-cache.memcached.timeout=100ms - - -store.chunks-cache.memcached.service=memcached - - -distributor.replication-factor=1 - ports: - - containerPort: 80 - volumeMounts: - - name: config-volume - mountPath: /etc/cortex - volumes: - - name: config-volume - configMap: - name: schema-config diff --git a/k8s/ruler-svc.yaml b/k8s/ruler-svc.yaml deleted file mode 100644 index 6477bb499b..0000000000 --- a/k8s/ruler-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: ruler -spec: - ports: - - port: 80 - selector: - name: ruler diff --git a/k8s/s3-dep.yaml b/k8s/s3-dep.yaml deleted file mode 100644 index 2e1b6d224c..0000000000 --- a/k8s/s3-dep.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: s3 -spec: - replicas: 1 - selector: - matchLabels: - name: s3 - template: - metadata: - labels: - name: s3 - annotations: - prometheus.io.scrape: "false" - spec: - containers: - - name: minio - image: minio/minio - # Create the "cortex" bucket before starting minio server. - command: [ "sh", "-c", "mkdir /storage/cortex && minio server /storage"] - env: - - name: MINIO_ACCESS_KEY - value: "cortex" - - name: MINIO_SECRET_KEY - value: "supersecret" - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9000 - volumeMounts: - - name: storage - mountPath: "/storage" - volumes: - - name: storage - emptyDir: {} diff --git a/k8s/s3-svc.yaml b/k8s/s3-svc.yaml deleted file mode 100644 index e45dcba353..0000000000 --- a/k8s/s3-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: s3 -spec: - ports: - - port: 9000 - selector: - name: s3 diff --git a/k8s/schema-config.yaml b/k8s/schema-config.yaml deleted file mode 100644 index f017d47d9a..0000000000 --- a/k8s/schema-config.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: schema-config -data: - schema.yaml: | - configs: - - from: "2020-01-01" - schema: v9 - index: - period: 168h - prefix: cortex_weekly_ - store: aws-dynamo - object_store: s3 diff --git a/k8s/table-manager-dep.yaml b/k8s/table-manager-dep.yaml deleted file mode 100644 index 31bc91ed83..0000000000 --- a/k8s/table-manager-dep.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: table-manager -spec: - replicas: 1 - selector: - matchLabels: - name: table-manager - template: - metadata: - labels: - name: table-manager - spec: - containers: - - name: table-manager - image: quay.io/cortexproject/cortex:v1.9.0 - imagePullPolicy: IfNotPresent - args: - - -target=table-manager - - -server.http-listen-port=80 - - -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000 - - -schema-config-file=/etc/cortex/schema.yaml - ports: - - containerPort: 80 - volumeMounts: - - name: config-volume - mountPath: /etc/cortex - volumes: - - name: config-volume - configMap: - name: schema-config diff --git a/k8s/table-manager-svc.yaml b/k8s/table-manager-svc.yaml deleted file mode 100644 index 744aa42569..0000000000 --- a/k8s/table-manager-svc.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: table-manager -spec: - ports: - - port: 80 - selector: - name: table-manager