From e11ca8be6e76fffd2cd2f21b2f52311c82b622b6 Mon Sep 17 00:00:00 2001 From: busu Date: Fri, 17 Mar 2023 16:25:41 +0800 Subject: [PATCH] release v1.4.0-beta --- .github/workflows/images-ci.yaml | 36 + .gitignore | 6 +- CHANGELOG/README.md | 50 +- README.md | 2 +- api/v1/common/zz_generated.deepcopy.go | 2 +- .../backup_storage.go} | 33 +- api/v1/polardbx/config.go | 16 +- api/v1/polardbx/restore.go | 33 +- api/v1/polardbx/status.go | 7 +- api/v1/polardbx/topology.go | 2 +- api/v1/polardbx/zz_generated.deepcopy.go | 109 +- api/v1/polardbxbackup_types.go | 14 +- api/v1/polardbxbackupbinlog_types.go | 80 + api/v1/polardbxbackupschedule_types.go | 79 + api/v1/polardbxcluster_types.go | 3 + api/v1/system_task_types.go | 61 + api/v1/systemtask/balance_resource.go | 7 + api/v1/systemtask/consts.go | 33 + api/v1/systemtask/generate.go | 19 + api/v1/systemtask/zz_generated.deepcopy.go | 39 + api/v1/xstore/config.go | 5 +- api/v1/xstore/follower.go | 4 + api/v1/xstore/phase.go | 8 + api/v1/xstore/topology.go | 2 +- api/v1/xstore/zz_generated.deepcopy.go | 10 +- api/v1/xstore_types.go | 2 + api/v1/xstorebackup_types.go | 27 +- api/v1/zz_generated.deepcopy.go | 329 +- build/images/polardbx-hpfs/Dockerfile | 2 +- build/images/polardbx-job/Dockerfile | 49 + build/images/polardbx-operator/Dockerfile | 2 +- build/root/Makefile | 2 +- charts/polardbx-logcollector/Chart.yaml | 4 +- charts/polardbx-monitor/Chart.yaml | 4 +- .../dashboard/polardbx-overview.json | 72 +- charts/polardbx-operator/Chart.yaml | 4 +- ...rdbx.aliyun.com_polardbxbackupbinlogs.yaml | 106 + .../polardbx.aliyun.com_polardbxbackups.yaml | 88 +- ...bx.aliyun.com_polardbxbackupschedules.yaml | 136 + .../polardbx.aliyun.com_polardbxclusters.yaml | 106 +- .../crds/polardbx.aliyun.com_systemtasks.yaml | 117 + .../polardbx.aliyun.com_xstorebackups.yaml | 14 + .../crds/polardbx.aliyun.com_xstores.yaml | 56 +- .../controller-config-configmap.yaml | 5 +- .../templates/host-path-file-configmap.yaml | 4 + .../host-path-file-service-service.yaml | 5 +- .../templates/parameter-template-product.yaml | 5306 +++++++++-------- .../admission-webhook-configuration.yaml | 19 + charts/polardbx-operator/values.yaml | 11 +- cmd/polardbx-filestream-cli/main.go | 5 + cmd/polardbx-hpfs/main.go | 24 +- cmd/polardbx-job/main.go | 76 + docs/en/index.md | 1 - go.mod | 6 +- go.sum | 3 + hack/make-rules/lib/build_env.py | 1 + hack/manifest.sh | 2 +- pkg/binlogtool/algo/locate_heartbeat.go | 2 - pkg/binlogtool/algo/locate_heartbeat_test.go | 3 +- pkg/binlogtool/algo/seek_consistent_point.go | 37 +- pkg/binlogtool/binlog/write.go | 5 +- pkg/binlogtool/cmd/seekcp.go | 33 +- pkg/binlogtool/system/system.go | 2 - pkg/binlogtool/tx/binary.go | 2 - pkg/binlogtool/tx/event.go | 2 - pkg/binlogtool/tx/parser.go | 6 +- pkg/binlogtool/tx/parser_test.go | 3 +- pkg/binlogtool/tx/xa.go | 2 - pkg/binlogtool/utils/binary.go | 6 +- pkg/binlogtool/utils/binary_little.go | 2 +- pkg/binlogtool/utils/binary_others.go | 4 +- pkg/featuregate/featuregates.go | 4 +- pkg/hpfs/backupbinlog/action.go | 256 + pkg/hpfs/backupbinlog/dao.go | 84 + pkg/hpfs/backupbinlog/heartbeat.go | 171 + pkg/hpfs/backupbinlog/meta.go | 59 + pkg/hpfs/backupbinlog/purge.go | 96 + pkg/hpfs/backupbinlog/purge_test.go | 13 + pkg/hpfs/backupbinlog/start.go | 76 + pkg/hpfs/backupbinlog/watcher.go | 524 ++ pkg/hpfs/backupbinlog/watcher_test.go | 11 + pkg/hpfs/common/common.go | 61 + .../{filestream => common}/common_test.go | 2 +- pkg/hpfs/{filestream => config}/config.go | 55 +- .../{filestream => config}/config_test.go | 2 +- pkg/hpfs/filestream/client.go | 44 + pkg/hpfs/filestream/common.go | 63 +- pkg/hpfs/filestream/request.go | 28 +- pkg/hpfs/filestream/server.go | 210 +- pkg/hpfs/filestream/xbstream_chunk.go | 1 + pkg/hpfs/hpfs_grpc.go | 196 + pkg/hpfs/hpfs_proxy.go | 107 + pkg/hpfs/proto/hpfs.pb.go | 2133 ++++++- pkg/hpfs/proto/hpfs.proto | 189 +- pkg/hpfs/remote/aliyun_oss.go | 182 +- pkg/hpfs/remote/aliyun_oss_test.go | 47 + pkg/hpfs/remote/ftp.go | 12 + pkg/hpfs/remote/hdfs.go | 12 + pkg/hpfs/remote/remote.go | 3 + pkg/hpfs/remote/sftp.go | 140 +- pkg/k8s/control/context.go | 2 +- pkg/k8s/helper/pod.go | 9 +- pkg/k8s/helper/service.go | 8 + pkg/k8s/prometheus/TBD | 0 pkg/meta/core/gms/manager.go | 9 +- pkg/meta/core/group/group_manager.go | 54 +- pkg/operator/v1/config/config.go | 51 + pkg/operator/v1/config/interface.go | 13 + pkg/operator/v1/operator.go | 45 +- .../controllers/polardbxbackup_controller.go | 20 +- .../polardbxbackupbinlog_controller.go | 117 + .../polardbxbackupschedule_controller.go | 109 + .../controllers/polardbxcluster_controller.go | 23 +- .../polardbxclusterknobs_controller.go | 4 +- .../controllers/polardbxmonitor_controller.go | 4 +- .../polardbxparameter_controller.go | 4 +- pkg/operator/v1/polardbx/factory/backup.go | 249 + .../v1/polardbx/factory/deployment.go | 63 +- .../v1/polardbx/factory/env_factory.go | 12 +- .../v1/polardbx/factory/object_factory.go | 9 +- .../v1/polardbx/factory/ports_factory.go | 22 + .../v1/polardbx/factory/probe_configure.go | 38 +- pkg/operator/v1/polardbx/factory/secret.go | 27 +- pkg/operator/v1/polardbx/factory/storage.go | 43 +- pkg/operator/v1/polardbx/meta/annotation.go | 14 + pkg/operator/v1/polardbx/meta/label.go | 49 +- pkg/operator/v1/polardbx/reconcile/context.go | 283 +- .../v1/polardbx/steps/backup/common/object.go | 215 +- .../common/{seekcpjob.go => seekcp_job.go} | 13 +- .../steps/backup/schedule/schedule.go | 127 + .../xstorejobbuilder/xstore_backup_builder.go | 58 - .../steps/backupbinlog/expire_file.go | 63 + .../polardbx/steps/backupbinlog/finalizer.go | 29 + .../polardbx/steps/backupbinlog/heartbeat.go | 152 + .../v1/polardbx/steps/backupbinlog/pxc.go | 65 + .../v1/polardbx/steps/backupbinlog/status.go | 75 + .../polardbx/steps/backupbinlog/sync_info.go | 248 + .../v1/polardbx/steps/instance/annotation.go | 43 + .../polardbx/steps/instance/common/object.go | 169 +- .../v1/polardbx/steps/instance/gms/gms.go | 57 +- .../v1/polardbx/steps/instance/object.go | 31 +- .../v1/polardbx/steps/instance/pitr/job.go | 301 + .../v1/polardbx/steps/instance/pitr/pitr.go | 138 + pkg/operator/v1/systemtask/common/adaptor.go | 35 + .../v1/systemtask/common/base_reconciler.go | 10 + pkg/operator/v1/systemtask/common/common.go | 21 + pkg/operator/v1/systemtask/common/context.go | 198 + pkg/operator/v1/systemtask/common/label.go | 5 + .../controllers/systemtask_controller.go | 64 + .../reconcile/resource_balance_reconciler.go | 40 + .../v1/systemtask/steps/balance_resource.go | 437 ++ pkg/operator/v1/systemtask/steps/status.go | 33 + .../change/driver/exec/update_executor.go | 2 +- .../xstore/change/driver/planner/planner.go | 4 +- pkg/operator/v1/xstore/command/commands.go | 42 +- .../controllers/xstore_backup_controller.go | 13 +- .../xstore/controllers/xstore_controller.go | 4 +- .../controllers/xstore_follower_controller.go | 4 +- .../v1/xstore/convention/convention.go | 30 +- pkg/operator/v1/xstore/factory/pod_extra.go | 12 + pkg/operator/v1/xstore/factory/secret.go | 38 +- pkg/operator/v1/xstore/meta/annotations.go | 21 + pkg/operator/v1/xstore/meta/labels.go | 32 +- .../v1/xstore/plugin/common/steps/common.go | 5 +- .../reconcilers/galaxy_backup_reconciler.go | 2 +- .../galaxy/reconcilers/galaxy_reconciler.go | 47 +- .../plugin/galaxy/steps/instance/log.go | 11 + .../v1/xstore/reconcile/backup_context.go | 55 +- pkg/operator/v1/xstore/reconcile/context.go | 22 +- .../backup/{backupjob.go => backup_job.go} | 4 +- ...binlogbackupJob.go => binlogbackup_job.go} | 0 .../backup/{collectjob.go => collect_job.go} | 0 pkg/operator/v1/xstore/steps/backup/status.go | 63 +- .../v1/xstore/steps/follower/check.go | 8 +- pkg/operator/v1/xstore/steps/follower/job.go | 14 +- pkg/operator/v1/xstore/steps/follower/pod.go | 7 +- .../v1/xstore/steps/instance/common.go | 12 + .../v1/xstore/steps/instance/consensus.go | 84 +- .../v1/xstore/steps/instance/objects.go | 12 +- .../v1/xstore/steps/instance/rebuild.go | 183 + .../v1/xstore/steps/instance/recoverjob.go | 4 +- .../v1/xstore/steps/instance/restore.go | 80 +- .../v1/xstore/steps/instance/restorejob.go | 6 +- .../xstore/steps/instance/support_legacy.go | 192 + .../v1/xstore/steps/instance/volumes.go | 15 +- pkg/pitr/context.go | 23 + pkg/pitr/driver.go | 94 + pkg/pitr/dto.go | 36 + pkg/pitr/restore_binlog.go | 489 ++ pkg/pitr/restore_binlog_test.go | 92 + pkg/pitr/workflow.go | 563 ++ pkg/pitr/workflow_test.go | 158 + pkg/probe/prober.go | 26 +- pkg/probe/xstore_ext/plugin/xstore_galaxy.go | 22 +- pkg/util/{ => name}/name.go | 74 +- pkg/util/path/path.go | 34 + pkg/webhook/polardbxbackup/validator.go | 124 + pkg/webhook/polardbxbackup/webhook.go | 41 + pkg/webhook/webhooks.go | 8 +- test/framework/polardbxcluster/expect.go | 20 +- test/framework/polardbxparameter/wait.go | 16 +- tools/xstore/cli/binlogbackup.py | 5 +- tools/xstore/cli/consensus.py | 76 +- tools/xstore/cli/engine.py | 11 +- tools/xstore/cli/myconfig.py | 14 +- tools/xstore/cli/process.py | 12 + tools/xstore/cli/recover.py | 10 +- tools/xstore/cli/restore.py | 41 +- tools/xstore/cli/utils/__init__.py | 15 + tools/xstore/cli/utils/timer.py | 41 + tools/xstore/core/config/mysql.py | 47 +- tools/xstore/core/consensus/manager.py | 33 +- tools/xstore/core/consensus/manager_impl.py | 101 +- tools/xstore/core/context/context.py | 1 + tools/xstore/core/engine/engine.py | 36 +- tools/xstore/core/engine/galaxy/engine.py | 43 +- tools/xstore/core/engine/util/__init__.py | 0 tools/xstore/core/engine/util/config_util.py | 48 + tools/xstore/entrypoint.py | 2 + tools/xstore/requirements.txt | 3 +- 220 files changed, 15589 insertions(+), 3844 deletions(-) create mode 100644 .github/workflows/images-ci.yaml rename api/v1/{backup_storage_provider.go => polardbx/backup_storage.go} (56%) create mode 100644 api/v1/polardbxbackupbinlog_types.go create mode 100644 api/v1/polardbxbackupschedule_types.go create mode 100644 api/v1/system_task_types.go create mode 100644 api/v1/systemtask/balance_resource.go create mode 100644 api/v1/systemtask/consts.go create mode 100644 api/v1/systemtask/generate.go create mode 100644 api/v1/systemtask/zz_generated.deepcopy.go create mode 100644 build/images/polardbx-job/Dockerfile create mode 100644 charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupbinlogs.yaml create mode 100644 charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupschedules.yaml create mode 100644 charts/polardbx-operator/crds/polardbx.aliyun.com_systemtasks.yaml create mode 100644 cmd/polardbx-job/main.go delete mode 100644 docs/en/index.md create mode 100644 pkg/hpfs/backupbinlog/action.go create mode 100644 pkg/hpfs/backupbinlog/dao.go create mode 100644 pkg/hpfs/backupbinlog/heartbeat.go create mode 100644 pkg/hpfs/backupbinlog/meta.go create mode 100644 pkg/hpfs/backupbinlog/purge.go create mode 100644 pkg/hpfs/backupbinlog/purge_test.go create mode 100644 pkg/hpfs/backupbinlog/start.go create mode 100644 pkg/hpfs/backupbinlog/watcher.go create mode 100644 pkg/hpfs/backupbinlog/watcher_test.go create mode 100644 pkg/hpfs/common/common.go rename pkg/hpfs/{filestream => common}/common_test.go (99%) rename pkg/hpfs/{filestream => config}/config.go (56%) rename pkg/hpfs/{filestream => config}/config_test.go (99%) create mode 100644 pkg/hpfs/remote/aliyun_oss_test.go create mode 100644 pkg/k8s/prometheus/TBD create mode 100644 pkg/operator/v1/polardbx/controllers/polardbxbackupbinlog_controller.go create mode 100644 pkg/operator/v1/polardbx/controllers/polardbxbackupschedule_controller.go create mode 100644 pkg/operator/v1/polardbx/factory/backup.go rename pkg/operator/v1/polardbx/steps/backup/common/{seekcpjob.go => seekcp_job.go} (89%) create mode 100644 pkg/operator/v1/polardbx/steps/backup/schedule/schedule.go delete mode 100644 pkg/operator/v1/polardbx/steps/backup/xstorejobbuilder/xstore_backup_builder.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/expire_file.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/finalizer.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/heartbeat.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/pxc.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/status.go create mode 100644 pkg/operator/v1/polardbx/steps/backupbinlog/sync_info.go create mode 100644 pkg/operator/v1/polardbx/steps/instance/pitr/job.go create mode 100644 pkg/operator/v1/polardbx/steps/instance/pitr/pitr.go create mode 100644 pkg/operator/v1/systemtask/common/adaptor.go create mode 100644 pkg/operator/v1/systemtask/common/base_reconciler.go create mode 100644 pkg/operator/v1/systemtask/common/common.go create mode 100644 pkg/operator/v1/systemtask/common/context.go create mode 100644 pkg/operator/v1/systemtask/common/label.go create mode 100644 pkg/operator/v1/systemtask/controllers/systemtask_controller.go create mode 100644 pkg/operator/v1/systemtask/reconcile/resource_balance_reconciler.go create mode 100644 pkg/operator/v1/systemtask/steps/balance_resource.go create mode 100644 pkg/operator/v1/systemtask/steps/status.go rename pkg/operator/v1/xstore/steps/backup/{backupjob.go => backup_job.go} (97%) rename pkg/operator/v1/xstore/steps/backup/{binlogbackupJob.go => binlogbackup_job.go} (100%) rename pkg/operator/v1/xstore/steps/backup/{collectjob.go => collect_job.go} (100%) create mode 100644 pkg/operator/v1/xstore/steps/instance/support_legacy.go create mode 100644 pkg/pitr/context.go create mode 100644 pkg/pitr/driver.go create mode 100644 pkg/pitr/dto.go create mode 100644 pkg/pitr/restore_binlog.go create mode 100644 pkg/pitr/restore_binlog_test.go create mode 100644 pkg/pitr/workflow.go create mode 100644 pkg/pitr/workflow_test.go rename pkg/util/{ => name}/name.go (55%) create mode 100644 pkg/util/path/path.go create mode 100644 pkg/webhook/polardbxbackup/validator.go create mode 100644 pkg/webhook/polardbxbackup/webhook.go create mode 100644 tools/xstore/cli/utils/__init__.py create mode 100644 tools/xstore/cli/utils/timer.py create mode 100644 tools/xstore/core/engine/util/__init__.py create mode 100644 tools/xstore/core/engine/util/config_util.py diff --git a/.github/workflows/images-ci.yaml b/.github/workflows/images-ci.yaml new file mode 100644 index 0000000..e31f723 --- /dev/null +++ b/.github/workflows/images-ci.yaml @@ -0,0 +1,36 @@ +name: CI for build images + +on: + pull_request: + branches: + - main + +# Environment variables available to all jobs and steps in this workflow. +env: + REGISTRY: registry.cn-zhangjiakou.aliyuncs.com + NAMESPACE: polardbx-ci + TAG: ${{ github.sha }} + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + # 1 Setup go environment + - name: Setup go environment + uses: actions/setup-go@v3 + with: + go-version: '1.18' + + # 2 Build images + - name: Build images + run: |- + make build REPO=$REGISTRY/$NAMESPACE TAG=$TAG + + # 3 Generate helm chart + - name: Generate helm chart + run: |- + make helm-package + diff --git a/.gitignore b/.gitignore index 95c3b77..a406c27 100644 --- a/.gitignore +++ b/.gitignore @@ -554,4 +554,8 @@ vendor !LICENSES/vendor !test/testcases/inner !pkg/debug -tools/xstore/venv \ No newline at end of file +tools/xstore/venv + +docs/** +/hack/make-rules/venv/ +/hack/make-rules/.idea/ diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md index cb5d388..2a17164 100644 --- a/CHANGELOG/README.md +++ b/CHANGELOG/README.md @@ -1,5 +1,25 @@ # Changelog +## 2023-03-17 + +Release v1.4.0-beta + ++ Enhancement & New Features + + Support consensus log backup + + Support point-in-time recovery(PITR) + + Support backup schedule + + Support restore without backup object + + Support rebuild dn follower automatically when `Slave_SQL_Running` becomes `No` due to `Last_SQL_Error` + + Support debug runmode of PolarDB-X cluster + + Support default PrometheusRule for polardbx cluster + + Use *Cluster IP* as the default communication address among dn pods + + Use container network as the default network type ++ Bug Fix + + Fix backup failure on follower when using non-host network + + Fix grafana cpu and mem usage issue for kube-state-metrics 2.3.0 + + Fix restore issue when binlog checksum is none + + Fix cdc crash during backup caused by sending heartbeat using XA transaction + ## 2022-11-17 Release v1.3.0 @@ -61,6 +81,7 @@ Fix overriding my.cnf passing to the script without section. Release v1.2.1. This is a bugfix release. + + Fix the CDC problem by setting the 'consensuslog_revise' variable of galaxyengine to ON. + Update docker registry mirror used in China mainland from ustc to sjtug. + Fix the scale in/out progress: use "schedule rebalance" command. @@ -70,11 +91,19 @@ This is a bugfix release. Release v1.2.0. + Enhancement & New Features - + Provide a new CR `PolarDBXMonitor` for declaring the monitoring of some `PolarDBXCluster`. The controller will create `ServiceMonitors` to make prometheus scraping the metrics. - + Provide a new chart `polardbx-monitor` which packages a customized [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) release with predefined dashboards for monitoring PolarDB-X clusters. - + Provide support for xpaxos version of [galaxyengine](https://github.com/ApsaraDB/galaxyengine). Now the `PolarDBXCluster` controller will create a typical paxos cluster (leader + follower + logger) for each GMS and DN by default. - + **Note** this is a breaking change. After the upgrade, the old `XStores` will be in an unmaintainable state. Update of the GMS/DN is not possible due to incompatible data/log formats. You may have to delete all the `PolarDBXCluster` in your Kubernetes before/after the upgrade. - + If you want to keep the compatibility, you can disable this feature by declaring feature gate with negative symbol `EnableGalaxyCluster-`. After that, no multi-node `XStore` with galaxy engine can be created. + + Provide a new CR `PolarDBXMonitor` for declaring the monitoring of some `PolarDBXCluster`. The controller will + create `ServiceMonitors` to make prometheus scraping the metrics. + + Provide a new chart `polardbx-monitor` which packages a + customized [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) release with predefined + dashboards for monitoring PolarDB-X clusters. + + Provide support for xpaxos version of [galaxyengine](https://github.com/ApsaraDB/galaxyengine). Now + the `PolarDBXCluster` controller will create a typical paxos cluster (leader + follower + logger) for each GMS and + DN by default. + + **Note** this is a breaking change. After the upgrade, the old `XStores` will be in an unmaintainable state. + Update of the GMS/DN is not possible due to incompatible data/log formats. You may have to delete all + the `PolarDBXCluster` in your Kubernetes before/after the upgrade. + + If you want to keep the compatibility, you can disable this feature by declaring feature gate with negative + symbol `EnableGalaxyCluster-`. After that, no multi-node `XStore` with galaxy engine can be created. + Support scaling up/down and self-healing of the `XStores`. + Bug Fix @@ -85,7 +114,7 @@ Release v1.2.0. Release v1.1.0. + Enhancement & New Features - + Support scaling in/out the PolarDBXCluster. + + Support scaling in/out the PolarDBXCluster. + Support configure SSL on PolarDBXCluster. + Add a new CRD PolarDBXCluster for setting and reviewing the configs of PolarDB-X cluster. + `config.dynamic.CN` is not going to sync with cluster while phase is running. @@ -94,21 +123,22 @@ Release v1.1.0. + Add label "polardbx/name" to Services、Secrets and ConfigMaps owned by PolarDBXCluster. + Support webhooks for PolarDBXCluster and PolarDBXClusterKnobs. + Support the complete spec of node selectors in `spec.topology.rules` of PolarDBXCluster. - + Create headless services for pods of xstore. Record DNS domains instead of raw IP for records of DN in GMS and among galaxyengine xstores. + + Create headless services for pods of xstore. Record DNS domains instead of raw IP for records of DN in GMS and + among galaxyengine xstores. + Support overwrite image tag in values.yaml (helm). + Support collect metrics for hotspot JVM 11. + Add e2e test tests. + Bug Fix - + Fix the wrong call stack when logging with `flow.Error` in some cases. + + Fix the wrong call stack when logging with `flow.Error` in some cases. + Fix the wrong timeout in polardbx-init. + Fix configuring host path of data volumes in values.yaml (helm). + Fix removing ini keys in galaxyengine's config. + Fix a `removeNull` in hsperfdata. -## 2021-10-15 +## 2021-10-15 -Release v1.0.0. +Release v1.0.0. + Provide v1 APIs: + PolarDBXCluster diff --git a/README.md b/README.md index a4dc7ab..4a62472 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Follow the [快速开始](https://doc.polardbx.com/quickstart/topics/quickstart- ## Documentation -Please refer to the [CHANGELOG](./CHANGELOG.md) for the notable changes for each version.** +Please refer to the [CHANGELOG](./CHANGELOG/README.md) for the notable changes for each version.** Refer to the documentations for more details, such as CRD definitions and operation guides. diff --git a/api/v1/common/zz_generated.deepcopy.go b/api/v1/common/zz_generated.deepcopy.go index 918c384..74422df 100644 --- a/api/v1/common/zz_generated.deepcopy.go +++ b/api/v1/common/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2022 Alibaba Group Holding Limited. +Copyright 2023 Alibaba Group Holding Limited. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1/backup_storage_provider.go b/api/v1/polardbx/backup_storage.go similarity index 56% rename from api/v1/backup_storage_provider.go rename to api/v1/polardbx/backup_storage.go index 51d5b27..eb34734 100644 --- a/api/v1/backup_storage_provider.go +++ b/api/v1/polardbx/backup_storage.go @@ -14,7 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package polardbx + +import ( + "errors" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" +) // BackupStorageProvider defines the configuration of storage for storing backup files. type BackupStorageProvider struct { @@ -33,3 +38,29 @@ const ( OSS BackupStorage = "oss" SFTP BackupStorage = "sftp" ) + +// BackupStorageFilestreamAction records filestream actions related to specified backup storage +type BackupStorageFilestreamAction struct { + Download filestream.Action + Upload filestream.Action + List filestream.Action +} + +func NewBackupStorageFilestreamAction(storage BackupStorage) (*BackupStorageFilestreamAction, error) { + switch storage { + case OSS: + return &BackupStorageFilestreamAction{ + Download: filestream.DownloadOss, + Upload: filestream.UploadOss, + List: filestream.ListOss, + }, nil + case SFTP: + return &BackupStorageFilestreamAction{ + Download: filestream.DownloadSsh, + Upload: filestream.UploadSsh, + List: filestream.ListSsh, + }, nil + default: + return nil, errors.New("invalid storage: " + string(storage)) + } +} diff --git a/api/v1/polardbx/config.go b/api/v1/polardbx/config.go index e552675..1e96d67 100644 --- a/api/v1/polardbx/config.go +++ b/api/v1/polardbx/config.go @@ -47,10 +47,15 @@ type CNConfig struct { } type DNConfig struct { - MycnfOverwrite string `json:"mycnfOverwrite,omitempty"` - LogPurgeInterval metav1.Duration `json:"logPurgeInterval,omitempty"` - EnableAuditLog bool `json:"enableAuditLog,omitempty"` - LogDataSeparation bool `json:"logDataSeparation,omitempty"` + MycnfOverwrite string `json:"mycnfOverwrite,omitempty"` + LogPurgeInterval metav1.Duration `json:"logPurgeInterval,omitempty"` + EnableAuditLog bool `json:"enableAuditLog,omitempty"` + LogDataSeparation bool `json:"logDataSeparation,omitempty"` + Envs map[string]intstr.IntOrString `json:"envs,omitempty"` +} + +type CDCConfig struct { + Envs map[string]intstr.IntOrString `json:"envs,omitempty"` } type Config struct { @@ -59,4 +64,7 @@ type Config struct { // DN config. DN DNConfig `json:"dn,omitempty"` + + // CDC config + CDC CDCConfig `json:"cdc,omitempty"` } diff --git a/api/v1/polardbx/restore.go b/api/v1/polardbx/restore.go index 85640f4..3464840 100644 --- a/api/v1/polardbx/restore.go +++ b/api/v1/polardbx/restore.go @@ -17,13 +17,21 @@ limitations under the License. package polardbx type RestoreSpec struct { - // BackupSet defines the source of backup set + // BackupSet defines the source of backup set. + // It works only when PolarDBXBackup object of this BackupSet still exists. BackupSet string `json:"backupset,omitempty"` - // From defines the source information, either backup sets, snapshot or an running cluster. + // StorageProvider defines storage used to perform backup + // +optional + StorageProvider *BackupStorageProvider `json:"storageProvider,omitempty"` + + // From defines the source information, either a running cluster, backup set path or backup selector. + // + // If PolarDBXRestoreFrom.BackupSetPath provided, restore will be performed using metadata backup in remote storage. + // It works only when BackupSet is empty and StorageProvider is provided. From PolarDBXRestoreFrom `json:"from,omitempty"` - // Time defines the specified time of the restored data, in the format of 'yyyy-MM-dd HH:mm:ss'. Required. + // Time defines the specified time of the restored data, in the format of 'yyyy-MM-ddTHH:mm:ssZ'. Required. Time string `json:"time,omitempty"` // TimeZone defines the specified time zone of the restore time. Default is the location of current cluster. @@ -37,15 +45,32 @@ type RestoreSpec struct { // use default spec, but replicas of dn will be forced to sync with original cluster now. Default is false // +optional SyncSpecWithOriginalCluster bool `json:"syncSpecWithOriginalCluster,omitempty"` + + // BinlogSource defines the binlog datasource + // +optional + BinlogSource *RestoreBinlogSource `json:"binlogSource,omitempty"` } // PolarDBXRestoreFrom defines the source information of the restored cluster. type PolarDBXRestoreFrom struct { - // PolarBDXName defines the the polardbx name that this polardbx is restored from. Optional. + // PolarBDXName defines the polardbx name that this polardbx is restored from. Optional. // +optional PolarBDXName string `json:"clusterName,omitempty"` + // BackupSetPath defines the location of backup set in remote storage + BackupSetPath string `json:"backupSetPath,omitempty"` + // BackupSelector defines the selector for the backups to be selected. Optional. // +optional BackupSelector map[string]string `json:"backupSelector,omitempty"` } + +// RestoreBinlogSource defines the binlog datasource +type RestoreBinlogSource struct { + //Namespace defines the source binlog namespace + Namespace string `json:"namespace,omitempty"` + //Checksum defines the binlog file checksum. + Checksum string `json:"checksum,omitempty"` + //StorageProvider defines the source binlog sink + StorageProvider *BackupStorageProvider `json:"storageProvider,omitempty"` +} diff --git a/api/v1/polardbx/status.go b/api/v1/polardbx/status.go index e28c3a1..d351ce5 100644 --- a/api/v1/polardbx/status.go +++ b/api/v1/polardbx/status.go @@ -18,7 +18,6 @@ package polardbx import ( "fmt" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -154,6 +153,12 @@ type ClusterReplicasStatus struct { CDC *ReplicasStatus `json:"cdc,omitempty"` } +// PitrStatus represents the status ot pitr restore +type PitrStatus struct { + PrepareJobEndpoint string `json:"preapreJobEndpoint,omitempty"` + Job string `json:"job,omitempty"` +} + type MonitorStatus string const ( diff --git a/api/v1/polardbx/topology.go b/api/v1/polardbx/topology.go index 41b97f8..ccbc415 100644 --- a/api/v1/polardbx/topology.go +++ b/api/v1/polardbx/topology.go @@ -98,7 +98,7 @@ type XStoreTemplate struct { // +optional ServiceType corev1.ServiceType `json:"serviceType,omitempty"` - // +kubebuilder:default=true + // +kubebuilder:default=false // HostNetwork mode. // +optional diff --git a/api/v1/polardbx/zz_generated.deepcopy.go b/api/v1/polardbx/zz_generated.deepcopy.go index 038d9ba..29a562f 100644 --- a/api/v1/polardbx/zz_generated.deepcopy.go +++ b/api/v1/polardbx/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2022 Alibaba Group Holding Limited. +Copyright 2023 Alibaba Group Holding Limited. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,6 +26,58 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageFilestreamAction) DeepCopyInto(out *BackupStorageFilestreamAction) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageFilestreamAction. +func (in *BackupStorageFilestreamAction) DeepCopy() *BackupStorageFilestreamAction { + if in == nil { + return nil + } + out := new(BackupStorageFilestreamAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageProvider) DeepCopyInto(out *BackupStorageProvider) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageProvider. +func (in *BackupStorageProvider) DeepCopy() *BackupStorageProvider { + if in == nil { + return nil + } + out := new(BackupStorageProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDCConfig) DeepCopyInto(out *CDCConfig) { + *out = *in + if in.Envs != nil { + in, out := &in.Envs, &out.Envs + *out = make(map[string]intstr.IntOrString, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDCConfig. +func (in *CDCConfig) DeepCopy() *CDCConfig { + if in == nil { + return nil + } + out := new(CDCConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CDCTemplate) DeepCopyInto(out *CDCTemplate) { *out = *in @@ -174,7 +226,8 @@ func (in *Condition) DeepCopy() *Condition { func (in *Config) DeepCopyInto(out *Config) { *out = *in in.CN.DeepCopyInto(&out.CN) - out.DN = in.DN + in.DN.DeepCopyInto(&out.DN) + in.CDC.DeepCopyInto(&out.CDC) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. @@ -191,6 +244,13 @@ func (in *Config) DeepCopy() *Config { func (in *DNConfig) DeepCopyInto(out *DNConfig) { *out = *in out.LogPurgeInterval = in.LogPurgeInterval + if in.Envs != nil { + in, out := &in.Envs, &out.Envs + *out = make(map[string]intstr.IntOrString, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNConfig. @@ -269,6 +329,21 @@ func (in *ParameterTemplate) DeepCopy() *ParameterTemplate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PitrStatus) DeepCopyInto(out *PitrStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PitrStatus. +func (in *PitrStatus) DeepCopy() *PitrStatus { + if in == nil { + return nil + } + out := new(PitrStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolarDBXRestoreFrom) DeepCopyInto(out *PolarDBXRestoreFrom) { *out = *in @@ -378,10 +453,40 @@ func (in *RestartingPods) DeepCopy() *RestartingPods { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreBinlogSource) DeepCopyInto(out *RestoreBinlogSource) { + *out = *in + if in.StorageProvider != nil { + in, out := &in.StorageProvider, &out.StorageProvider + *out = new(BackupStorageProvider) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreBinlogSource. +func (in *RestoreBinlogSource) DeepCopy() *RestoreBinlogSource { + if in == nil { + return nil + } + out := new(RestoreBinlogSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { *out = *in + if in.StorageProvider != nil { + in, out := &in.StorageProvider, &out.StorageProvider + *out = new(BackupStorageProvider) + **out = **in + } in.From.DeepCopyInto(&out.From) + if in.BinlogSource != nil { + in, out := &in.BinlogSource, &out.BinlogSource + *out = new(RestoreBinlogSource) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. diff --git a/api/v1/polardbxbackup_types.go b/api/v1/polardbxbackup_types.go index 7e29a47..df89204 100644 --- a/api/v1/polardbxbackup_types.go +++ b/api/v1/polardbxbackup_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "github.com/alibaba/polardbx-operator/api/v1/polardbx" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -56,7 +57,14 @@ type PolarDBXBackupSpec struct { CleanPolicy CleanPolicyType `json:"cleanPolicy,omitempty"` // StorageProvider defines the backend storage to store the backup files. - StorageProvider BackupStorageProvider `json:"storageProvider,omitempty"` + StorageProvider polardbx.BackupStorageProvider `json:"storageProvider,omitempty"` + + // +kubebuilder:default=follower + // +kubebuilder:validation:Enum=leader;follower + + // PreferredBackupRole defines the role of node on which backup will happen + // +optional + PreferredBackupRole string `json:"preferredBackupRole,omitempty"` } // PolarDBXBackupPhase defines the phase of backup @@ -68,8 +76,10 @@ const ( BackupCollecting PolarDBXBackupPhase = "Collecting" BackupCalculating PolarDBXBackupPhase = "Calculating" BinlogBackuping PolarDBXBackupPhase = "BinlogBackuping" + MetadataBackuping PolarDBXBackupPhase = "MetadataBackuping" BackupFinished PolarDBXBackupPhase = "Finished" BackupFailed PolarDBXBackupPhase = "Failed" + BackupDummy PolarDBXBackupPhase = "Dummy" ) // PolarDBXBackupStatus defines the observed state of PolarDBXBackup @@ -107,7 +117,7 @@ type PolarDBXBackupStatus struct { HeartBeatName string `json:"heartbeat,omitempty"` // StorageName represents the kind of Storage - StorageName BackupStorage `json:"storageName,omitempty"` + StorageName polardbx.BackupStorage `json:"storageName,omitempty"` // BackupRootPath stores the root path of backup set BackupRootPath string `json:"backupRootPath,omitempty"` diff --git a/api/v1/polardbxbackupbinlog_types.go b/api/v1/polardbxbackupbinlog_types.go new file mode 100644 index 0000000..9118281 --- /dev/null +++ b/api/v1/polardbxbackupbinlog_types.go @@ -0,0 +1,80 @@ +package v1 + +import ( + "github.com/alibaba/polardbx-operator/api/v1/polardbx" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type PolarDBXBackupBinlogSpec struct { + // +kubebuilder:validation:Required + PxcName string `json:"pxcName,omitempty"` + // +kubebuilder:validation:Required + PxcUid string `json:"pxcUid,omitempty"` + // +kubebuilder:default=168 + RemoteExpireLogHours intstr.IntOrString `json:"remoteExpireLogHours,omitempty"` + // +kubebuilder:default=7 + LocalExpireLogHours intstr.IntOrString `json:"localExpireLogHours,omitempty"` + // +kubebuilder:default=60 + MaxLocalBinlogCount uint64 `json:"maxLocalBinlogCount,omitempty"` + // +kubebuilder:default=true + PointInTimeRecover bool `json:"pointInTimeRecover,omitempty"` + // StorageProvider defines the backend storage to store the backup files. + StorageProvider polardbx.BackupStorageProvider `json:"storageProvider,omitempty"` + // +kubebuilder:default=CRC32 + BinlogChecksum string `json:"binlogChecksum,omitempty"` +} + +type PolarDbXBinlogPodStatus struct { + PodName string `json:"podName,omitempty"` + Version int64 `json:"version,omitempty"` + Host string `json:"host,omitempty"` + WorkDir string `json:"workDir,omitempty"` +} + +type BackupBinlogPhase string + +const ( + BackupBinlogPhaseNew BackupBinlogPhase = "" + BackupBinlogPhaseRunning BackupBinlogPhase = "running" + BackupBinlogPhaseCheckExpiredFile BackupBinlogPhase = "checkExpiredFile" + BackupBinlogPhaseDeleting BackupBinlogPhase = "deleting" +) + +type PolarDBXBackupBinlogStatus struct { + // ObservedGeneration represents the observed generation of PolarDBXBackupBinlogSpec. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + //Phase represents the executing phase in the controller + Phase BackupBinlogPhase `json:"phase,omitempty"` + + //CheckExpireFileLastTime represents a timestamp of checking expired files + CheckExpireFileLastTime uint64 `json:"checkExpireFileLastTime,omitempty"` + + //LastDeletedFiles represent the files deleted recently + LastDeletedFiles []string `json:"lastDeletedFiles,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=pxcblog +// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.phase` + +type PolarDBXBackupBinlog struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PolarDBXBackupBinlogSpec `json:"spec,omitempty"` + Status PolarDBXBackupBinlogStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +type PolarDBXBackupBinlogList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PolarDBXBackupBinlog `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PolarDBXBackupBinlog{}, &PolarDBXBackupBinlogList{}) +} diff --git a/api/v1/polardbxbackupschedule_types.go b/api/v1/polardbxbackupschedule_types.go new file mode 100644 index 0000000..a68e95d --- /dev/null +++ b/api/v1/polardbxbackupschedule_types.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PolarDBXBackupScheduleSpec struct { + // Schedule represents backup schedule in format of cron expression. + Schedule string `json:"schedule,omitempty"` + + // Suspend denotes whether current schedule is paused. + Suspend bool `json:"suspend,omitempty"` + + // +kubebuilder:default=0 + + // MaxBackupCount defines limit of reserved backup. + // If backup exceeds the limit, the eldest backup sets will be purged. Default is zero, which means no limit. + // +optional + MaxBackupCount int `json:"maxBackupCount,omitempty"` + + // BackupSpec defines spec of each backup. + BackupSpec PolarDBXBackupSpec `json:"backupSpec,omitempty"` +} + +type PolarDBXBackupScheduleStatus struct { + // LastBackupTime records the time of the last backup. + LastBackupTime *metav1.Time `json:"lastBackupTime,omitempty"` + + // NextBackupTime records the scheduled time of the next backup. + NextBackupTime *metav1.Time `json:"nextBackupTime,omitempty"` + + // LastBackup records the name of the last backup. + LastBackup string `json:"lastBackup,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=pxcbackupschedule;pbs +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="SCHEDULE",type=string,JSONPath=`.spec.schedule` +// +kubebuilder:printcolumn:name="LAST_BACKUP_TIME",type=string,JSONPath=`.status.lastBackupTime` +// +kubebuilder:printcolumn:name="NEXT_BACKUP_TIME",type=string,JSONPath=`.status.nextBackupTime` +// +kubebuilder:printcolumn:name="LAST_BACKUP",type=string,JSONPath=`.status.lastBackup` + +type PolarDBXBackupSchedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PolarDBXBackupScheduleSpec `json:"spec,omitempty"` + Status PolarDBXBackupScheduleStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PolarDBXBackupScheduleList contains a list of PolarDBXBackupSchedule. +type PolarDBXBackupScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PolarDBXBackupSchedule `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PolarDBXBackupSchedule{}, &PolarDBXBackupScheduleList{}) +} diff --git a/api/v1/polardbxcluster_types.go b/api/v1/polardbxcluster_types.go index a8e64c6..9e5dda4 100644 --- a/api/v1/polardbxcluster_types.go +++ b/api/v1/polardbxcluster_types.go @@ -128,6 +128,9 @@ type PolarDBXClusterStatus struct { // RestartingPods represents pods need to restart RestartingPods polardbx.RestartingPods `json:"restartingPods,omitempty"` + + //PitrStatus represents the status of the pitr restore + PitrStatus *polardbx.PitrStatus `json:"pitrStatus,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1/system_task_types.go b/api/v1/system_task_types.go new file mode 100644 index 0000000..af43476 --- /dev/null +++ b/api/v1/system_task_types.go @@ -0,0 +1,61 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type SystemTaskSpec struct { + TaskType systemtask.Type `json:"taskType,omitempty"` + CnReplicas int `json:"cnReplicas,omitempty"` + CnResources corev1.ResourceRequirements `json:"cnResources,omitempty"` + DnResources corev1.ResourceRequirements `json:"dnResources,omitempty"` +} + +type SystemTaskStatus struct { + Phase systemtask.Phase `json:"phase,omitempty"` + StBalanceResourceStatus *systemtask.StBalanceResourceStatus `json:"stBalanceResourceStatus,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=st +// +kubebuilder:printcolumn:name="PHASE",type=string,JSONPath=`.status.phase` + +// SystemTask is the schema for the systemtask. +type SystemTask struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SystemTaskSpec `json:"spec,omitempty"` + Status SystemTaskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SystemTaskList contains a list of SystemTask. +type SystemTaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SystemTask `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SystemTaskList{}, &SystemTask{}) +} diff --git a/api/v1/systemtask/balance_resource.go b/api/v1/systemtask/balance_resource.go new file mode 100644 index 0000000..c2d2c75 --- /dev/null +++ b/api/v1/systemtask/balance_resource.go @@ -0,0 +1,7 @@ +package systemtask + +type StBalanceResourceStatus struct { + RebuildTaskName string `json:"rebuildTaskName,omitempty"` + RebuildFinish bool `json:"rebuildFinish,omitempty"` + BalanceLeaderFinish bool `json:"balanceLeaderFinish,omitempty"` +} diff --git a/api/v1/systemtask/consts.go b/api/v1/systemtask/consts.go new file mode 100644 index 0000000..e2c2cd0 --- /dev/null +++ b/api/v1/systemtask/consts.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package systemtask + +type Type string + +const ( + BalanceResource Type = "BalanceResource" +) + +type Phase string + +// balance +const ( + InitPhase Phase = "" + RebuildTaskPhase Phase = "RebuildTaskPhase" + BalanceRolePhase Phase = "BalanceRolePhase" + SuccessPhase Phase = "Success" +) diff --git a/api/v1/systemtask/generate.go b/api/v1/systemtask/generate.go new file mode 100644 index 0000000..9883c5e --- /dev/null +++ b/api/v1/systemtask/generate.go @@ -0,0 +1,19 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +kubebuilder:object:generate=true + +package systemtask diff --git a/api/v1/systemtask/zz_generated.deepcopy.go b/api/v1/systemtask/zz_generated.deepcopy.go new file mode 100644 index 0000000..fff152e --- /dev/null +++ b/api/v1/systemtask/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package systemtask + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StBalanceResourceStatus) DeepCopyInto(out *StBalanceResourceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StBalanceResourceStatus. +func (in *StBalanceResourceStatus) DeepCopy() *StBalanceResourceStatus { + if in == nil { + return nil + } + out := new(StBalanceResourceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1/xstore/config.go b/api/v1/xstore/config.go index 5cf67cd..d2fb0b8 100644 --- a/api/v1/xstore/config.go +++ b/api/v1/xstore/config.go @@ -43,6 +43,7 @@ type ControllerConfig struct { } type Config struct { - Dynamic ControllerConfig `json:"controller,omitempty"` - Engine EngineConfig `json:"engine,omitempty"` + Dynamic ControllerConfig `json:"controller,omitempty"` + Engine EngineConfig `json:"engine,omitempty"` + Envs map[string]intstr.IntOrString `json:"envs,omitempty"` } diff --git a/api/v1/xstore/follower.go b/api/v1/xstore/follower.go index cb95640..a156e68 100644 --- a/api/v1/xstore/follower.go +++ b/api/v1/xstore/follower.go @@ -21,6 +21,10 @@ const ( FollowerPhaseDeleting FollowerPhase = "FollowerPhaseDeleting" ) +func IsEndPhase(phase FollowerPhase) bool { + return phase == FollowerPhaseSuccess || phase == FollowerPhaseFailed || phase == FollowerPhaseDeleting +} + type FollowerRole string const ( diff --git a/api/v1/xstore/phase.go b/api/v1/xstore/phase.go index a3daced..61edb36 100644 --- a/api/v1/xstore/phase.go +++ b/api/v1/xstore/phase.go @@ -32,6 +32,7 @@ const ( PhaseFailed Phase = "Failed" PhaseRestarting Phase = "Restarting" PhaseUnknown Phase = "Unknown" + PhaseAdapting Phase = "Adapting" ) type Stage string @@ -43,3 +44,10 @@ const ( StageClean Stage = "Clean" StageUpdate Stage = "Update" ) + +// valid stage of xstore adapting +const ( + StageAdapting Stage = "StageAdapting" + StageFlushMetadata Stage = "StageFlushMetadata" + StageBeforeSuccess Stage = "StageBeforeSuccess" +) diff --git a/api/v1/xstore/topology.go b/api/v1/xstore/topology.go index 0a8e501..79ae382 100644 --- a/api/v1/xstore/topology.go +++ b/api/v1/xstore/topology.go @@ -35,7 +35,7 @@ type NodeSpec struct { // ImagePullSecrets represents the secrets for pulling private images. ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - // +kubebuilder:default=true + // +kubebuilder:default=false // HostNetwork defines whether the node uses the host network. Default is true. HostNetwork *bool `json:"hostNetwork,omitempty"` diff --git a/api/v1/xstore/zz_generated.deepcopy.go b/api/v1/xstore/zz_generated.deepcopy.go index 2e6f3e5..543479f 100644 --- a/api/v1/xstore/zz_generated.deepcopy.go +++ b/api/v1/xstore/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2022 Alibaba Group Holding Limited. +Copyright 2023 Alibaba Group Holding Limited. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ import ( "github.com/alibaba/polardbx-operator/api/v1/common" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -52,6 +53,13 @@ func (in *Config) DeepCopyInto(out *Config) { *out = *in in.Dynamic.DeepCopyInto(&out.Dynamic) in.Engine.DeepCopyInto(&out.Engine) + if in.Envs != nil { + in, out := &in.Envs, &out.Envs + *out = make(map[string]intstr.IntOrString, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. diff --git a/api/v1/xstore_types.go b/api/v1/xstore_types.go index ccfb509..05d1e96 100644 --- a/api/v1/xstore_types.go +++ b/api/v1/xstore_types.go @@ -49,6 +49,8 @@ type XStoreRestoreSpec struct { // TimeZone defines the specified time zone of the restore time. Default is the location of current cluster. // +optional TimeZone string `json:"timezone,omitempty"` + + PitrEndpoint string `json:"pitrEndpoiint,omitempty"` } type XStoreSpec struct { diff --git a/api/v1/xstorebackup_types.go b/api/v1/xstorebackup_types.go index c8e5bad..296ac41 100644 --- a/api/v1/xstorebackup_types.go +++ b/api/v1/xstorebackup_types.go @@ -17,14 +17,17 @@ limitations under the License. package v1 import ( + "github.com/alibaba/polardbx-operator/api/v1/polardbx" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. type XStoreReference struct { - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty"` + UID types.UID `json:"uid,omitempty"` } // XStoreBackupSpec defines the desired state of XStoreBackup @@ -33,13 +36,24 @@ type XStoreBackupSpec struct { // Engine is the engine used by xstore. Default is "galaxy". // +optional - Engine string `json:"engine,omitempty"` - XStore XStoreReference `json:"xstore,omitempty"` - Timezone string `json:"timezone,omitempty"` + Engine string `json:"engine,omitempty"` + + XStore XStoreReference `json:"xstore,omitempty"` + + Timezone string `json:"timezone,omitempty"` + // RetentionTime defines how long will this backup set be kept RetentionTime metav1.Duration `json:"retentionTime,omitempty"` + // StorageProvider defines backup storage configuration - StorageProvider BackupStorageProvider `json:"storageProvider,omitempty"` + StorageProvider polardbx.BackupStorageProvider `json:"storageProvider,omitempty"` + + // +kubebuilder:default=follower + // +kubebuilder:validation:Enum=leader;follower + + // PreferredBackupRole defines the role of node on which backup will happen + // +optional + PreferredBackupRole string `json:"preferredBackupRole,omitempty"` } // XStoreBackupStatus defines the observed state of XStoreBackup @@ -50,7 +64,7 @@ type XStoreBackupStatus struct { TargetPod string `json:"targetPod,omitempty"` CommitIndex int64 `json:"commitIndex,omitempty"` // StorageName represents the kind of Storage - StorageName BackupStorage `json:"storageName,omitempty"` + StorageName polardbx.BackupStorage `json:"storageName,omitempty"` // BackupRootPath stores the root path of backup set BackupRootPath string `json:"backupRootPath,omitempty"` // BackupSetTimestamp records timestamp of last event included in tailored binlog @@ -66,6 +80,7 @@ const ( XStoreBinlogBackuping XStoreBackupPhase = "Binloging" XStoreBinlogWaiting XStoreBackupPhase = "Waiting" XStoreBackupFinished XStoreBackupPhase = "Finished" + XStoreBackupDummy XStoreBackupPhase = "Dummy" ) // +kubebuilder:object:root=true diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index c220972..b5d7ee4 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2022 Alibaba Group Holding Limited. +Copyright 2023 Alibaba Group Holding Limited. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,27 +23,13 @@ package v1 import ( "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/api/v1/systemtask" "github.com/alibaba/polardbx-operator/api/v1/xstore" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupStorageProvider) DeepCopyInto(out *BackupStorageProvider) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageProvider. -func (in *BackupStorageProvider) DeepCopy() *BackupStorageProvider { - if in == nil { - return nil - } - out := new(BackupStorageProvider) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowFlagType) DeepCopyInto(out *FlowFlagType) { *out = *in @@ -198,6 +184,103 @@ func (in *PolarDBXBackup) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupBinlog) DeepCopyInto(out *PolarDBXBackupBinlog) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupBinlog. +func (in *PolarDBXBackupBinlog) DeepCopy() *PolarDBXBackupBinlog { + if in == nil { + return nil + } + out := new(PolarDBXBackupBinlog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolarDBXBackupBinlog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupBinlogList) DeepCopyInto(out *PolarDBXBackupBinlogList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolarDBXBackupBinlog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupBinlogList. +func (in *PolarDBXBackupBinlogList) DeepCopy() *PolarDBXBackupBinlogList { + if in == nil { + return nil + } + out := new(PolarDBXBackupBinlogList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolarDBXBackupBinlogList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupBinlogSpec) DeepCopyInto(out *PolarDBXBackupBinlogSpec) { + *out = *in + out.RemoteExpireLogHours = in.RemoteExpireLogHours + out.LocalExpireLogHours = in.LocalExpireLogHours + out.StorageProvider = in.StorageProvider +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupBinlogSpec. +func (in *PolarDBXBackupBinlogSpec) DeepCopy() *PolarDBXBackupBinlogSpec { + if in == nil { + return nil + } + out := new(PolarDBXBackupBinlogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupBinlogStatus) DeepCopyInto(out *PolarDBXBackupBinlogStatus) { + *out = *in + if in.LastDeletedFiles != nil { + in, out := &in.LastDeletedFiles, &out.LastDeletedFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupBinlogStatus. +func (in *PolarDBXBackupBinlogStatus) DeepCopy() *PolarDBXBackupBinlogStatus { + if in == nil { + return nil + } + out := new(PolarDBXBackupBinlogStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolarDBXBackupList) DeepCopyInto(out *PolarDBXBackupList) { *out = *in @@ -230,6 +313,104 @@ func (in *PolarDBXBackupList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupSchedule) DeepCopyInto(out *PolarDBXBackupSchedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupSchedule. +func (in *PolarDBXBackupSchedule) DeepCopy() *PolarDBXBackupSchedule { + if in == nil { + return nil + } + out := new(PolarDBXBackupSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolarDBXBackupSchedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupScheduleList) DeepCopyInto(out *PolarDBXBackupScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolarDBXBackupSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupScheduleList. +func (in *PolarDBXBackupScheduleList) DeepCopy() *PolarDBXBackupScheduleList { + if in == nil { + return nil + } + out := new(PolarDBXBackupScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolarDBXBackupScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupScheduleSpec) DeepCopyInto(out *PolarDBXBackupScheduleSpec) { + *out = *in + out.BackupSpec = in.BackupSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupScheduleSpec. +func (in *PolarDBXBackupScheduleSpec) DeepCopy() *PolarDBXBackupScheduleSpec { + if in == nil { + return nil + } + out := new(PolarDBXBackupScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDBXBackupScheduleStatus) DeepCopyInto(out *PolarDBXBackupScheduleStatus) { + *out = *in + if in.LastBackupTime != nil { + in, out := &in.LastBackupTime, &out.LastBackupTime + *out = (*in).DeepCopy() + } + if in.NextBackupTime != nil { + in, out := &in.NextBackupTime, &out.NextBackupTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXBackupScheduleStatus. +func (in *PolarDBXBackupScheduleStatus) DeepCopy() *PolarDBXBackupScheduleStatus { + if in == nil { + return nil + } + out := new(PolarDBXBackupScheduleStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PolarDBXBackupSpec) DeepCopyInto(out *PolarDBXBackupSpec) { *out = *in @@ -533,6 +714,11 @@ func (in *PolarDBXClusterStatus) DeepCopyInto(out *PolarDBXClusterStatus) { (*in).DeepCopyInto(*out) } in.RestartingPods.DeepCopyInto(&out.RestartingPods) + if in.PitrStatus != nil { + in, out := &in.PitrStatus, &out.PitrStatus + *out = new(polardbx.PitrStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDBXClusterStatus. @@ -890,6 +1076,117 @@ func (in *PolarDBXParameterTemplateStatus) DeepCopy() *PolarDBXParameterTemplate return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolarDbXBinlogPodStatus) DeepCopyInto(out *PolarDbXBinlogPodStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolarDbXBinlogPodStatus. +func (in *PolarDbXBinlogPodStatus) DeepCopy() *PolarDbXBinlogPodStatus { + if in == nil { + return nil + } + out := new(PolarDbXBinlogPodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTask) DeepCopyInto(out *SystemTask) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTask. +func (in *SystemTask) DeepCopy() *SystemTask { + if in == nil { + return nil + } + out := new(SystemTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemTask) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTaskList) DeepCopyInto(out *SystemTaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SystemTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTaskList. +func (in *SystemTaskList) DeepCopy() *SystemTaskList { + if in == nil { + return nil + } + out := new(SystemTaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SystemTaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTaskSpec) DeepCopyInto(out *SystemTaskSpec) { + *out = *in + in.CnResources.DeepCopyInto(&out.CnResources) + in.DnResources.DeepCopyInto(&out.DnResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTaskSpec. +func (in *SystemTaskSpec) DeepCopy() *SystemTaskSpec { + if in == nil { + return nil + } + out := new(SystemTaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemTaskStatus) DeepCopyInto(out *SystemTaskStatus) { + *out = *in + if in.StBalanceResourceStatus != nil { + in, out := &in.StBalanceResourceStatus, &out.StBalanceResourceStatus + *out = new(systemtask.StBalanceResourceStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemTaskStatus. +func (in *SystemTaskStatus) DeepCopy() *SystemTaskStatus { + if in == nil { + return nil + } + out := new(SystemTaskStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateNode) DeepCopyInto(out *TemplateNode) { *out = *in diff --git a/build/images/polardbx-hpfs/Dockerfile b/build/images/polardbx-hpfs/Dockerfile index 4988567..418723e 100644 --- a/build/images/polardbx-hpfs/Dockerfile +++ b/build/images/polardbx-hpfs/Dockerfile @@ -13,7 +13,7 @@ # limitations under the License. # Build the init command -FROM golang:1.17 as builder +FROM golang:1.18 as builder WORKDIR /workspace diff --git a/build/images/polardbx-job/Dockerfile b/build/images/polardbx-job/Dockerfile new file mode 100644 index 0000000..f49c883 --- /dev/null +++ b/build/images/polardbx-job/Dockerfile @@ -0,0 +1,49 @@ +# Copyright 2021 Alibaba Group Holding Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the init command +FROM golang:1.18 as builder + +WORKDIR /workspace + +# Copy the Go modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +ENV GOPROXY=https://goproxy.cn +# Cache deps before building and copying source +RUN go mod download + +# Copy the Go sources +ADD api api +ADD cmd cmd +ADD pkg pkg +ADD third-party third-party + +# Build +RUN mkdir -p target +RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -tags polardbx -o target/polardbx-job cmd/polardbx-job/main.go + +# Use alpine as the base image +FROM busybox:latest + +WORKDIR / +COPY --from=builder /workspace/target/polardbx-job . + +USER root:root + +ARG VERSION=test +RUN echo "$VERSION" > version + +ENTRYPOINT ["/polardbx-job"] diff --git a/build/images/polardbx-operator/Dockerfile b/build/images/polardbx-operator/Dockerfile index 3946a02..7348f4d 100644 --- a/build/images/polardbx-operator/Dockerfile +++ b/build/images/polardbx-operator/Dockerfile @@ -13,7 +13,7 @@ # Build the manager binary -FROM golang:1.17 as builder +FROM golang:1.18 as builder WORKDIR /workspace diff --git a/build/root/Makefile b/build/root/Makefile index 232cf84..8fe1505 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -172,7 +172,7 @@ endif .PHONY: manifests manifests: - @controller-gen crd:crdVersions=v1 rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=charts/polardbx-operator/crds + @controller-gen crd:crdVersions=v1 rbac:roleName=manager-role webhook paths="./api/..." output:crd:artifacts:config=charts/polardbx-operator/crds .PHONY: generate-notice generate-notice: diff --git a/charts/polardbx-logcollector/Chart.yaml b/charts/polardbx-logcollector/Chart.yaml index 50b116b..bde7c4e 100644 --- a/charts/polardbx-logcollector/Chart.yaml +++ b/charts/polardbx-logcollector/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: polardbx-logcollector description: Helm chart of polardbx-operator logcollector plugin type: application -version: 1.3.0 -appVersion: v1.3.0 +version: 1.4.0-beta +appVersion: v1.4.0-beta keywords: - polardb-x - operator diff --git a/charts/polardbx-monitor/Chart.yaml b/charts/polardbx-monitor/Chart.yaml index 2125834..4a2c202 100644 --- a/charts/polardbx-monitor/Chart.yaml +++ b/charts/polardbx-monitor/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: polardbx-monitor description: Helm chart of polardbx-operator monitor plugin type: application -version: 1.3.0 -appVersion: v1.3.0 +version: 1.4.0-beta +appVersion: v1.4.0-beta keywords: - polardb-x - operator diff --git a/charts/polardbx-monitor/dashboard/polardbx-overview.json b/charts/polardbx-monitor/dashboard/polardbx-overview.json index 5865bc7..7cde23f 100644 --- a/charts/polardbx-monitor/dashboard/polardbx-overview.json +++ b/charts/polardbx-monitor/dashboard/polardbx-overview.json @@ -86,7 +86,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(workload, workload_type) polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}\n)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(workload, workload_type) polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(workload, workload_type) polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left(workload, workload_type) polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "instant": true, "interval": "", "legendFormat": "CPU (CN)", @@ -95,7 +95,7 @@ }, { "exemplar": true, - "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}\n)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n", + "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n", "hide": false, "instant": true, "interval": "", @@ -367,7 +367,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(rate(polardbx_stats_best_effort_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) +\n\nsum(rate(polardbx_stats_xa_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) +\n\nsum(rate(polardbx_stats_tso_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) ", + "expr": "sum(rate(polardbx_stats_best_effort_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) +\n\nsum(rate(polardbx_stats_xa_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) +\n\nsum(rate(polardbx_stats_tso_transaction_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) ", "instant": true, "interval": "", "intervalFactor": 1, @@ -436,7 +436,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(rate(polardbx_stats_error_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s]))", + "expr": "sum(rate(polardbx_stats_error_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m]))", "instant": true, "interval": "", "intervalFactor": 1, @@ -509,7 +509,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) + 1) / 1000", + "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", "instant": true, "interval": "", "legendFormat": "Logical", @@ -579,7 +579,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) + 1) / 1000", + "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", "instant": true, "interval": "", "legendFormat": "RT (Logical)", @@ -588,7 +588,7 @@ }, { "exemplar": true, - "expr": "sum(delta(polardbx_stats_physical_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) / (sum(delta (polardbx_stats_physical_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) + 1) / 1000", + "expr": "sum(delta(polardbx_stats_physical_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_physical_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", "hide": false, "instant": true, "interval": "", @@ -951,7 +951,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -961,7 +961,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -981,7 +981,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -991,7 +991,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"gms\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -1331,7 +1331,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "format": "table", "hide": false, "instant": true, @@ -1341,7 +1341,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "format": "table", "hide": false, "instant": true, @@ -1361,7 +1361,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "format": "table", "hide": false, "instant": true, @@ -1371,7 +1371,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "format": "table", "hide": false, "instant": true, @@ -1603,7 +1603,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", + "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)", "interval": "", "legendFormat": "avg cpu %", "queryType": "randomWalk", @@ -1763,7 +1763,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n", + "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n)\n", "interval": "", "legendFormat": "avg mem %", "queryType": "randomWalk", @@ -1926,7 +1926,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)", + "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)", "interval": "", "legendFormat": "{{ pod }}", "queryType": "randomWalk", @@ -2086,7 +2086,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n", + "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cn\"}\n) by (pod)\n", "interval": "", "legendFormat": "{{ pod }}", "queryType": "randomWalk", @@ -2253,7 +2253,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[30s])) + 1) / 1000", + "expr": "sum(delta(polardbx_stats_request_time_cost_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) / (sum(delta (polardbx_stats_request_count_total{polardbx_name=\"$polardbx\", namespace=\"$namespace\"}[1m])) + 1) / 1000", "interval": "", "legendFormat": "rt (logical)", "queryType": "randomWalk", @@ -3131,7 +3131,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -3141,7 +3141,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -3161,7 +3161,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -3171,7 +3171,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", + "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", polardbx_role=\"dn\", namespace=\"$namespace\", xstore_role=\"leader\"}\n)", "format": "table", "hide": false, "instant": true, @@ -3401,7 +3401,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})\n/\nsum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})", + "expr": "sum(polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})\n/\nsum(kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})", "interval": "", "legendFormat": "avg cpu %", "queryType": "randomWalk", @@ -3559,7 +3559,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})\n/\nsum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})", + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})\n/\nsum(kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"})", "interval": "", "legendFormat": "avg mem %", "queryType": "randomWalk", @@ -3718,7 +3718,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)\n/\nsum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)", + "expr": "sum(polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)\n/\nsum(kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)", "interval": "", "legendFormat": "{{ xstore_name }}", "queryType": "randomWalk", @@ -3876,7 +3876,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)\n/\nsum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)", + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)\n/\nsum(kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left(xstore_name) mysql_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"dn\", xstore_role=\"leader\"}) by (xstore_name)", "interval": "", "legendFormat": "{{ xstore_name }}", "queryType": "randomWalk", @@ -5175,7 +5175,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", "format": "table", "hide": false, "instant": true, @@ -5185,7 +5185,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", + "expr": "sum by (pod) (polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"})\n/ sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", "format": "table", "hide": false, "instant": true, @@ -5205,7 +5205,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", + "expr": "sum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", "format": "table", "hide": false, "instant": true, @@ -5215,7 +5215,7 @@ }, { "exemplar": true, - "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", + "expr": "sum by (pod) (\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/\nsum by (pod) (\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", "format": "table", "hide": false, "instant": true, @@ -5447,7 +5447,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", + "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)", "interval": "", "legendFormat": "avg cpu %", "queryType": "randomWalk", @@ -5607,7 +5607,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n", + "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n)\n", "interval": "", "legendFormat": "avg mem %", "queryType": "randomWalk", @@ -5770,7 +5770,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)", + "expr": "sum(\n polardbx_container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"cpu\", unit=\"core\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)", "interval": "", "legendFormat": "{{ pod }}", "queryType": "randomWalk", @@ -5930,7 +5930,7 @@ "targets": [ { "exemplar": true, - "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", container=\"engine\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n", + "expr": "sum(\n container_memory_working_set_bytes{namespace=\"$namespace\", container=\"engine\", id=~\"^/kubepods.*\"}\n * on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{namespace=\"$namespace\", container=\"engine\", resource=\"memory\", unit=\"byte\"}\n* on(namespace,pod)\n group_left() polardbx_cdc_up{polardbx_name=\"$polardbx\", namespace=\"$namespace\", polardbx_role=\"cdc\"}\n) by (pod)\n", "interval": "", "legendFormat": "{{ pod }}", "queryType": "randomWalk", diff --git a/charts/polardbx-operator/Chart.yaml b/charts/polardbx-operator/Chart.yaml index 1b637f5..7a47a8a 100644 --- a/charts/polardbx-operator/Chart.yaml +++ b/charts/polardbx-operator/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: polardbx-operator description: Helm chart of polardbx-operator type: application -version: 1.3.0 -appVersion: v1.3.0 +version: 1.4.0-beta +appVersion: v1.4.0-beta keywords: - polardb-x - operator diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupbinlogs.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupbinlogs.yaml new file mode 100644 index 0000000..59f70b1 --- /dev/null +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupbinlogs.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: polardbxbackupbinlogs.polardbx.aliyun.com +spec: + group: polardbx.aliyun.com + names: + kind: PolarDBXBackupBinlog + listKind: PolarDBXBackupBinlogList + plural: polardbxbackupbinlogs + shortNames: + - pxcblog + singular: polardbxbackupbinlog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: PHASE + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + binlogChecksum: + default: CRC32 + type: string + localExpireLogHours: + anyOf: + - type: integer + - type: string + default: 7 + x-kubernetes-int-or-string: true + maxLocalBinlogCount: + default: 60 + format: int64 + type: integer + pointInTimeRecover: + default: true + type: boolean + pxcName: + type: string + pxcUid: + type: string + remoteExpireLogHours: + anyOf: + - type: integer + - type: string + default: 168 + x-kubernetes-int-or-string: true + storageProvider: + description: StorageProvider defines the backend storage to store + the backup files. + properties: + sink: + description: Sink defines the storage configuration choose to + perform backup + type: string + storageName: + description: StorageName defines the storage medium used to perform + backup + type: string + type: object + type: object + status: + properties: + checkExpireFileLastTime: + description: CheckExpireFileLastTime represents a timestamp of checking + expired files + format: int64 + type: integer + lastDeletedFiles: + description: LastDeletedFiles represent the files deleted recently + items: + type: string + type: array + observedGeneration: + description: ObservedGeneration represents the observed generation + of PolarDBXBackupBinlogSpec. + format: int64 + type: integer + phase: + description: Phase represents the executing phase in the controller + type: string + type: object + type: object + served: true + storage: true + subresources: {} diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackups.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackups.yaml index fb29b63..9c6a4bf 100644 --- a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackups.yaml +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackups.yaml @@ -84,6 +84,14 @@ spec: UIDs and names do not get conflated. type: string type: object + preferredBackupRole: + default: follower + description: PreferredBackupRole defines the role of node on which + backup will happen + enum: + - leader + - follower + type: string retentionTime: description: RetentionTime defines the retention time of the backup. The format is the same with metav1.Duration. Must be provided. @@ -129,6 +137,17 @@ spec: description: Config defines the configuration of the current cluster. Both dynamic and static configs of CN and DN are included. properties: + cdc: + description: CDC config + properties: + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object cn: description: CN config. properties: @@ -191,6 +210,13 @@ spec: properties: enableAuditLog: type: boolean + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object logDataSeparation: type: boolean logPurgeInterval: @@ -284,11 +310,40 @@ spec: might fail due to lack of backups silently. properties: backupset: - description: BackupSet defines the source of backup set + description: BackupSet defines the source of backup set. It + works only when PolarDBXBackup object of this BackupSet + still exists. type: string + binlogSource: + description: BinlogSource defines the binlog datasource + properties: + checksum: + description: Checksum defines the binlog file checksum. + type: string + namespace: + description: Namespace defines the source binlog namespace + type: string + storageProvider: + description: StorageProvider defines the source binlog + sink + properties: + sink: + description: Sink defines the storage configuration + choose to perform backup + type: string + storageName: + description: StorageName defines the storage medium + used to perform backup + type: string + type: object + type: object from: - description: From defines the source information, either backup - sets, snapshot or an running cluster. + description: "From defines the source information, either + a running cluster, backup set path or backup selector. \n + If PolarDBXRestoreFrom.BackupSetPath provided, restore will + be performed using metadata backup in remote storage. It + works only when BackupSet is empty and StorageProvider is + provided." properties: backupSelector: additionalProperties: @@ -296,9 +351,26 @@ spec: description: BackupSelector defines the selector for the backups to be selected. Optional. type: object + backupSetPath: + description: BackupSetPath defines the location of backup + set in remote storage + type: string clusterName: - description: PolarBDXName defines the the polardbx name - that this polardbx is restored from. Optional. + description: PolarBDXName defines the polardbx name that + this polardbx is restored from. Optional. + type: string + type: object + storageProvider: + description: StorageProvider defines storage used to perform + backup + properties: + sink: + description: Sink defines the storage configuration choose + to perform backup + type: string + storageName: + description: StorageName defines the storage medium used + to perform backup type: string type: object syncSpecWithOriginalCluster: @@ -312,7 +384,7 @@ spec: type: boolean time: description: Time defines the specified time of the restored - data, in the format of 'yyyy-MM-dd HH:mm:ss'. Required. + data, in the format of 'yyyy-MM-ddTHH:mm:ssZ'. Required. type: string timezone: description: TimeZone defines the specified time zone of the @@ -596,7 +668,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: @@ -695,7 +767,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupschedules.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupschedules.yaml new file mode 100644 index 0000000..8c9ad15 --- /dev/null +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxbackupschedules.yaml @@ -0,0 +1,136 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: polardbxbackupschedules.polardbx.aliyun.com +spec: + group: polardbx.aliyun.com + names: + kind: PolarDBXBackupSchedule + listKind: PolarDBXBackupScheduleList + plural: polardbxbackupschedules + shortNames: + - pxcbackupschedule + - pbs + singular: polardbxbackupschedule + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: SCHEDULE + type: string + - jsonPath: .status.lastBackupTime + name: LAST_BACKUP_TIME + type: string + - jsonPath: .status.nextBackupTime + name: NEXT_BACKUP_TIME + type: string + - jsonPath: .status.lastBackup + name: LAST_BACKUP + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + backupSpec: + description: BackupSpec defines spec of each backup. + properties: + cleanPolicy: + default: Retain + description: CleanPolicy defines the clean policy when cluster + is deleted. Default is Retain. + enum: + - Retain + - Delete + - OnFailure + type: string + cluster: + description: Cluster represents the reference of target polardbx + cluster to perform the backup action. + properties: + name: + type: string + uid: + description: UID is a type that holds unique ID values, including + UUIDs. Because we don't ONLY use UUIDs, this is an alias + to string. Being a type captures intent and helps make + sure that UIDs and names do not get conflated. + type: string + type: object + preferredBackupRole: + default: follower + description: PreferredBackupRole defines the role of node on which + backup will happen + enum: + - leader + - follower + type: string + retentionTime: + description: RetentionTime defines the retention time of the backup. + The format is the same with metav1.Duration. Must be provided. + type: string + storageProvider: + description: StorageProvider defines the backend storage to store + the backup files. + properties: + sink: + description: Sink defines the storage configuration choose + to perform backup + type: string + storageName: + description: StorageName defines the storage medium used to + perform backup + type: string + type: object + type: object + maxBackupCount: + default: 0 + description: MaxBackupCount defines limit of reserved backup. If backup + exceeds the limit, the eldest backup sets will be purged. Default + is zero, which means no limit. + type: integer + schedule: + description: Schedule represents backup schedule in format of cron + expression. + type: string + suspend: + description: Suspend denotes whether current schedule is paused. + type: boolean + type: object + status: + properties: + lastBackup: + description: LastBackup records the name of the last backup. + type: string + lastBackupTime: + description: LastBackupTime records the time of the last backup. + format: date-time + type: string + nextBackupTime: + description: NextBackupTime records the scheduled time of the next + backup. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxclusters.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxclusters.yaml index d144ebd..2d2a760 100644 --- a/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxclusters.yaml +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_polardbxclusters.yaml @@ -96,6 +96,17 @@ spec: description: Config defines the configuration of the current cluster. Both dynamic and static configs of CN and DN are included. properties: + cdc: + description: CDC config + properties: + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object cn: description: CN config. properties: @@ -158,6 +169,13 @@ spec: properties: enableAuditLog: type: boolean + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object logDataSeparation: type: boolean logPurgeInterval: @@ -250,11 +268,37 @@ spec: fail due to lack of backups silently. properties: backupset: - description: BackupSet defines the source of backup set + description: BackupSet defines the source of backup set. It works + only when PolarDBXBackup object of this BackupSet still exists. type: string + binlogSource: + description: BinlogSource defines the binlog datasource + properties: + checksum: + description: Checksum defines the binlog file checksum. + type: string + namespace: + description: Namespace defines the source binlog namespace + type: string + storageProvider: + description: StorageProvider defines the source binlog sink + properties: + sink: + description: Sink defines the storage configuration choose + to perform backup + type: string + storageName: + description: StorageName defines the storage medium used + to perform backup + type: string + type: object + type: object from: - description: From defines the source information, either backup - sets, snapshot or an running cluster. + description: "From defines the source information, either a running + cluster, backup set path or backup selector. \n If PolarDBXRestoreFrom.BackupSetPath + provided, restore will be performed using metadata backup in + remote storage. It works only when BackupSet is empty and StorageProvider + is provided." properties: backupSelector: additionalProperties: @@ -262,9 +306,25 @@ spec: description: BackupSelector defines the selector for the backups to be selected. Optional. type: object + backupSetPath: + description: BackupSetPath defines the location of backup + set in remote storage + type: string clusterName: - description: PolarBDXName defines the the polardbx name that - this polardbx is restored from. Optional. + description: PolarBDXName defines the polardbx name that this + polardbx is restored from. Optional. + type: string + type: object + storageProvider: + description: StorageProvider defines storage used to perform backup + properties: + sink: + description: Sink defines the storage configuration choose + to perform backup + type: string + storageName: + description: StorageName defines the storage medium used to + perform backup type: string type: object syncSpecWithOriginalCluster: @@ -278,7 +338,7 @@ spec: type: boolean time: description: Time defines the specified time of the restored data, - in the format of 'yyyy-MM-dd HH:mm:ss'. Required. + in the format of 'yyyy-MM-ddTHH:mm:ssZ'. Required. type: string timezone: description: TimeZone defines the specified time zone of the restore @@ -559,7 +619,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: @@ -656,7 +716,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: @@ -1665,6 +1725,14 @@ spec: phase: description: Phase is the current phase of the cluster. type: string + pitrStatus: + description: PitrStatus represents the status of the pitr restore + properties: + job: + type: string + preapreJobEndpoint: + type: string + type: object randHash: description: Rand represents a random string value to avoid collision. type: string @@ -1747,6 +1815,17 @@ spec: properties: config: properties: + cdc: + description: CDC config + properties: + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: object cn: description: CN config. properties: @@ -1809,6 +1888,13 @@ spec: properties: enableAuditLog: type: boolean + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object logDataSeparation: type: boolean logPurgeInterval: @@ -2041,7 +2127,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: @@ -2140,7 +2226,7 @@ spec: dependent. type: string hostNetwork: - default: true + default: false description: HostNetwork mode. type: boolean image: diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_systemtasks.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_systemtasks.yaml new file mode 100644 index 0000000..0b46447 --- /dev/null +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_systemtasks.yaml @@ -0,0 +1,117 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.0 + creationTimestamp: null + name: systemtasks.polardbx.aliyun.com +spec: + group: polardbx.aliyun.com + names: + kind: SystemTask + listKind: SystemTaskList + plural: systemtasks + shortNames: + - st + singular: systemtask + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: PHASE + type: string + name: v1 + schema: + openAPIV3Schema: + description: SystemTask is the schema for the systemtask. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + cnReplicas: + type: integer + cnResources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + dnResources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + taskType: + type: string + type: object + status: + properties: + phase: + type: string + stBalanceResourceStatus: + properties: + balanceLeaderFinish: + type: boolean + rebuildFinish: + type: boolean + rebuildTaskName: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: {} diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_xstorebackups.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_xstorebackups.yaml index eac5bcd..aa595cb 100644 --- a/charts/polardbx-operator/crds/polardbx.aliyun.com_xstorebackups.yaml +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_xstorebackups.yaml @@ -63,6 +63,14 @@ spec: default: galaxy description: Engine is the engine used by xstore. Default is "galaxy". type: string + preferredBackupRole: + default: follower + description: PreferredBackupRole defines the role of node on which + backup will happen + enum: + - leader + - follower + type: string retentionTime: description: RetentionTime defines how long will this backup set be kept @@ -85,6 +93,12 @@ spec: properties: name: type: string + uid: + description: UID is a type that holds unique ID values, including + UUIDs. Because we don't ONLY use UUIDs, this is an alias to + string. Being a type captures intent and helps make sure that + UIDs and names do not get conflated. + type: string type: object type: object status: diff --git a/charts/polardbx-operator/crds/polardbx.aliyun.com_xstores.yaml b/charts/polardbx-operator/crds/polardbx.aliyun.com_xstores.yaml index 5f65ccf..5fee796 100644 --- a/charts/polardbx-operator/crds/polardbx.aliyun.com_xstores.yaml +++ b/charts/polardbx-operator/crds/polardbx.aliyun.com_xstores.yaml @@ -146,6 +146,13 @@ spec: type: object type: object type: object + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object type: object engine: default: galaxy @@ -212,6 +219,8 @@ spec: xstore is restored from. Optional. type: string type: object + pitrEndpoiint: + type: string time: description: Time defines the specified time of the restored data, in the format of 'yyyy-MM-dd HH:mm:ss'. Required. @@ -660,6 +669,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the @@ -741,6 +751,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -870,6 +881,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term @@ -941,6 +953,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -1074,6 +1087,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the @@ -1155,6 +1169,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -1284,6 +1299,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term @@ -1355,6 +1371,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -1387,7 +1404,7 @@ spec: type: object type: object hostNetwork: - default: true + default: false description: HostNetwork defines whether the node uses the host network. Default is true. type: boolean @@ -1810,6 +1827,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -1880,6 +1898,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that @@ -1995,6 +2014,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -2059,6 +2079,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2175,6 +2196,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -2245,6 +2267,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that @@ -2360,6 +2383,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -2424,6 +2448,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -2453,7 +2478,7 @@ spec: type: object type: object hostNetwork: - default: true + default: false description: HostNetwork defines whether the node uses the host network. Default is true. type: boolean @@ -2698,6 +2723,13 @@ spec: type: object type: object type: object + envs: + additionalProperties: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object type: object observedGeneration: description: ObservedGeneration is the observed generation of the @@ -3128,6 +3160,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the @@ -3209,6 +3242,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -3338,6 +3372,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term @@ -3409,6 +3444,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -3542,6 +3578,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the @@ -3623,6 +3660,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -3752,6 +3790,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term @@ -3823,6 +3862,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names @@ -3855,7 +3895,7 @@ spec: type: object type: object hostNetwork: - default: true + default: false description: HostNetwork defines whether the node uses the host network. Default is true. type: boolean @@ -4278,6 +4318,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -4348,6 +4389,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that @@ -4463,6 +4505,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -4527,6 +4570,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -4643,6 +4687,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies @@ -4713,6 +4758,7 @@ spec: are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that @@ -4828,6 +4874,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaceSelector: description: A label query over the set of namespaces that the term applies to. @@ -4892,6 +4939,7 @@ spec: ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: description: namespaces specifies a static list of namespace names that the term @@ -4921,7 +4969,7 @@ spec: type: object type: object hostNetwork: - default: true + default: false description: HostNetwork defines whether the node uses the host network. Default is true. type: boolean diff --git a/charts/polardbx-operator/templates/controller-config-configmap.yaml b/charts/polardbx-operator/templates/controller-config-configmap.yaml index 513f107..9ce1585 100644 --- a/charts/polardbx-operator/templates/controller-config-configmap.yaml +++ b/charts/polardbx-operator/templates/controller-config-configmap.yaml @@ -19,9 +19,11 @@ data: {{- if .Values.useLatestImage }} prober: {{ .Values.images.probeProxy }}:latest exporter: {{ .Values.images.polardbxExporter }}:latest + job: {{ .Values.images.polardbxJob }}:latest {{- else }} prober: {{ .Values.images.probeProxy }}:{{ .Values.imageTag | default .Chart.AppVersion }} exporter: {{ .Values.images.polardbxExporter }}:{{ .Values.imageTag | default .Chart.AppVersion }} + job: {{ .Values.images.polardbxJob }}:{{ .Values.imageTag | default .Chart.AppVersion }} {{- end }} compute: {{- if .Values.useLatestImage }} @@ -56,7 +58,8 @@ data: volume_data: {{ .Values.node.volumes.data }}/xstore volume_log: {{ .Values.node.volumes.log }}/xstore volume_filestream: {{ .Values.node.volumes.filestream }} - hpfs_endpoint: {{.Values.hostPathFileService.name}}:{{ .Values.hostPathFileService.port }} + hpfs_endpoint: {{.Values.hostPathFileService.name}}.{{ .Release.Namespace }}:{{ .Values.hostPathFileService.port }} + fs_endpoint: {{.Values.hostPathFileService.name}}.{{ .Release.Namespace }}:{{ .Values.hostPathFileService.fsPort }} {{- if .Values.extension.config.security }} security: {{ toYaml .Values.extension.config.security | indent 6 }} diff --git a/charts/polardbx-operator/templates/host-path-file-configmap.yaml b/charts/polardbx-operator/templates/host-path-file-configmap.yaml index 37ee7ac..b05e94f 100644 --- a/charts/polardbx-operator/templates/host-path-file-configmap.yaml +++ b/charts/polardbx-operator/templates/host-path-file-configmap.yaml @@ -7,4 +7,8 @@ data: config.yaml: |- sinks: {{ toYaml .Values.hostPathFileService.sinks | indent 7 }} + backupBinlogConfig: + rootDirectories: + - {{ .Values.node.volumes.data }}/xstore + - {{ .Values.node.volumes.log }}/xstore diff --git a/charts/polardbx-operator/templates/host-path-file-service-service.yaml b/charts/polardbx-operator/templates/host-path-file-service-service.yaml index a772654..62c251b 100644 --- a/charts/polardbx-operator/templates/host-path-file-service-service.yaml +++ b/charts/polardbx-operator/templates/host-path-file-service-service.yaml @@ -16,4 +16,7 @@ spec: ports: - name: hpfs port: {{ .Values.hostPathFileService.port }} - targetPort: hpfs \ No newline at end of file + targetPort: hpfs + - name: filestream + port: {{ .Values.hostPathFileService.fsPort }} + targetPort: filestream \ No newline at end of file diff --git a/charts/polardbx-operator/templates/parameter-template-product.yaml b/charts/polardbx-operator/templates/parameter-template-product.yaml index 688215d..c8e30b8 100644 --- a/charts/polardbx-operator/templates/parameter-template-product.yaml +++ b/charts/polardbx-operator/templates/parameter-template-product.yaml @@ -7,2647 +7,2675 @@ spec: cn: name: cnTemplate paramList: - - defaultValue: 05:00 - divisibilityFactor: 0 - mode: readwrite - name: BACKGROUND_STATISTIC_COLLECTION_END_TIME - optional: '[00:00|01:00|02:00|03:00|04:00|05:00|06:00|07:00|08:00|09:00|10:00|11:00|12:00|13:00|14:00|15:00|16:00|17:00|18:00|19:00|20:00|21:00|22:00|23:00]' - restart: false - unit: STRING - - defaultValue: 02:00 - divisibilityFactor: 0 - mode: readwrite - name: BACKGROUND_STATISTIC_COLLECTION_START_TIME - optional: '[00:00|01:00|02:00|03:00|04:00|05:00|06:00|07:00|08:00|09:00|10:00|11:00|12:00|13:00|14:00|15:00|16:00|17:00|18:00|19:00|20:00|21:00|22:00|23:00]' - restart: false - unit: STRING - - defaultValue: '5000' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_BLOCK_TIMEOUT - optional: '[1000-60000]' - restart: false - unit: INT - - defaultValue: '30' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_IDLE_TIMEOUT - optional: '[1-60]' - restart: false - unit: INT - - defaultValue: '60' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_MAX_POOL_SIZE - optional: '[1-1600]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_MAX_WAIT_THREAD_COUNT - optional: '[-1-8192]' - restart: false - unit: INT - - defaultValue: '20' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_MIN_POOL_SIZE - optional: '[0-60]' - restart: false - unit: INT - - defaultValue: '512' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_XPROTO_MAX_POOLED_SESSION_PER_INST - optional: '[1-8192]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: CONN_POOL_XPROTO_STORAGE_DB_PORT - optional: '[-1-0]' - restart: false - unit: INT - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_BACKGROUND_STATISTIC_COLLECTION - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 1 - mode: readwrite - name: ENABLE_COMPLEX_DML_CROSS_DB - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_HLL - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_LOCAL_MODE - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_LOGICALVIEW_COST - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'false' - divisibilityFactor: 1 - mode: readwrite - name: ENABLE_RECYCLEBIN - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_SPM - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 1 - mode: readwrite - name: ENABLE_SQL_FLASHBACK_EXACT_MATCH - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_STATEMENTS_SUMMARY - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 0 - mode: readwrite - name: ENABLE_STATISTIC_FEEDBACK - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: 'true' - divisibilityFactor: 1 - mode: readwrite - name: FORBID_EXECUTE_DML_ALL - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: '-1' - divisibilityFactor: 1 - mode: readwrite - name: GENERAL_DYNAMIC_SPEED_LIMITATION - optional: '[-1-10000000]' - restart: false - unit: INT - - defaultValue: 'false' - divisibilityFactor: 1 - mode: readwrite - name: INFO_SCHEMA_QUERY_WITH_STAT - optional: '[true|false]' - restart: false - unit: STRING - - defaultValue: '2' - divisibilityFactor: 0 - mode: readwrite - name: IN_SUB_QUERY_THRESHOLD - optional: '[1-65535]' - restart: false - unit: INT - - defaultValue: SYSTEM - divisibilityFactor: 1 - mode: readwrite - name: LOGICAL_DB_TIME_ZONE - optional: '[SYSTEM|±HH:mm]' - restart: false - unit: TZ - - defaultValue: '28800000' - divisibilityFactor: 1 - mode: readwrite - name: LOGIC_IDLE_TIMEOUT - optional: '[3600000-86400000]' - restart: false - unit: INT - - defaultValue: '16777216' - divisibilityFactor: 1 - mode: readwrite - name: MAX_ALLOWED_PACKET - optional: '[4194304-33554432]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: PARALLELISM - optional: '[-1-8]' - restart: false - unit: INT - - defaultValue: '-1' - divisibilityFactor: 1 - mode: readwrite - name: PER_QUERY_MEMORY_LIMIT - optional: '[-1-9223372036854775807]' - restart: false - unit: INT - - defaultValue: 00:00-01:00 - divisibilityFactor: 1 - mode: readwrite - name: PURGE_TRANS_START_TIME - optional: 00:00~23:59 - restart: false - unit: HOUR_RANGE - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: SLOW_SQL_TIME - optional: '[1000-900000]' - restart: false - unit: INT - - defaultValue: '900000' - divisibilityFactor: 1 - mode: readwrite - name: SOCKET_TIMEOUT - optional: '[0-3600000]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: STATEMENTS_SUMMARY_PERCENT - optional: '[0-100]' - restart: false - unit: INT - - defaultValue: REPEATABLE-READ - divisibilityFactor: 0 - mode: readwrite - name: TRANSACTION_ISOLATION - optional: '[REPEATABLE-READ|READ-COMMITTED|READ-UNCOMMITTED|SERIALIZABLE]' - restart: false - unit: STRING - - defaultValue: '500' - divisibilityFactor: 1 - mode: readwrite - name: XPROTO_MAX_DN_CONCURRENT - optional: '[1-8192]' - restart: false - unit: INT - - defaultValue: '32' - divisibilityFactor: 1 - mode: readwrite - name: XPROTO_MAX_DN_WAIT_CONNECTION - optional: '[1-8192]' - restart: false - unit: INT - - defaultValue: 'false' - divisibilityFactor: 1 - mode: readwrite - name: ENABLE_COROUTINE - optional: '[true|false]' - restart: true - unit: STRING + - defaultValue: 05:00 + divisibilityFactor: 0 + mode: readwrite + name: BACKGROUND_STATISTIC_COLLECTION_END_TIME + optional: '[00:00|01:00|02:00|03:00|04:00|05:00|06:00|07:00|08:00|09:00|10:00|11:00|12:00|13:00|14:00|15:00|16:00|17:00|18:00|19:00|20:00|21:00|22:00|23:00]' + restart: false + unit: STRING + - defaultValue: 02:00 + divisibilityFactor: 0 + mode: readwrite + name: BACKGROUND_STATISTIC_COLLECTION_START_TIME + optional: '[00:00|01:00|02:00|03:00|04:00|05:00|06:00|07:00|08:00|09:00|10:00|11:00|12:00|13:00|14:00|15:00|16:00|17:00|18:00|19:00|20:00|21:00|22:00|23:00]' + restart: false + unit: STRING + - defaultValue: '5000' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_BLOCK_TIMEOUT + optional: '[1000-60000]' + restart: false + unit: INT + - defaultValue: '30' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_IDLE_TIMEOUT + optional: '[1-60]' + restart: false + unit: INT + - defaultValue: '60' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_MAX_POOL_SIZE + optional: '[1-1600]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_MAX_WAIT_THREAD_COUNT + optional: '[-1-8192]' + restart: false + unit: INT + - defaultValue: '20' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_MIN_POOL_SIZE + optional: '[0-60]' + restart: false + unit: INT + - defaultValue: '512' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_XPROTO_MAX_POOLED_SESSION_PER_INST + optional: '[1-8192]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: CONN_POOL_XPROTO_STORAGE_DB_PORT + optional: '[-1-0]' + restart: false + unit: INT + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_BACKGROUND_STATISTIC_COLLECTION + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 1 + mode: readwrite + name: ENABLE_COMPLEX_DML_CROSS_DB + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_HLL + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_LOCAL_MODE + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_LOGICALVIEW_COST + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'false' + divisibilityFactor: 1 + mode: readwrite + name: ENABLE_RECYCLEBIN + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_SPM + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 1 + mode: readwrite + name: ENABLE_SQL_FLASHBACK_EXACT_MATCH + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_STATEMENTS_SUMMARY + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 0 + mode: readwrite + name: ENABLE_STATISTIC_FEEDBACK + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: 'true' + divisibilityFactor: 1 + mode: readwrite + name: FORBID_EXECUTE_DML_ALL + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: '-1' + divisibilityFactor: 1 + mode: readwrite + name: GENERAL_DYNAMIC_SPEED_LIMITATION + optional: '[-1-10000000]' + restart: false + unit: INT + - defaultValue: 'false' + divisibilityFactor: 1 + mode: readwrite + name: INFO_SCHEMA_QUERY_WITH_STAT + optional: '[true|false]' + restart: false + unit: STRING + - defaultValue: '2' + divisibilityFactor: 0 + mode: readwrite + name: IN_SUB_QUERY_THRESHOLD + optional: '[1-65535]' + restart: false + unit: INT + - defaultValue: SYSTEM + divisibilityFactor: 1 + mode: readwrite + name: LOGICAL_DB_TIME_ZONE + optional: '[SYSTEM|±HH:mm]' + restart: false + unit: TZ + - defaultValue: '28800000' + divisibilityFactor: 1 + mode: readwrite + name: LOGIC_IDLE_TIMEOUT + optional: '[3600000-86400000]' + restart: false + unit: INT + - defaultValue: '16777216' + divisibilityFactor: 1 + mode: readwrite + name: MAX_ALLOWED_PACKET + optional: '[4194304-33554432]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: PARALLELISM + optional: '[-1-8]' + restart: false + unit: INT + - defaultValue: '-1' + divisibilityFactor: 1 + mode: readwrite + name: PER_QUERY_MEMORY_LIMIT + optional: '[-1-9223372036854775807]' + restart: false + unit: INT + - defaultValue: 00:00-01:00 + divisibilityFactor: 1 + mode: readwrite + name: PURGE_TRANS_START_TIME + optional: 00:00~23:59 + restart: false + unit: HOUR_RANGE + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: SLOW_SQL_TIME + optional: '[1000-900000]' + restart: false + unit: INT + - defaultValue: '900000' + divisibilityFactor: 1 + mode: readwrite + name: SOCKET_TIMEOUT + optional: '[0-3600000]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: STATEMENTS_SUMMARY_PERCENT + optional: '[0-100]' + restart: false + unit: INT + - defaultValue: REPEATABLE-READ + divisibilityFactor: 0 + mode: readwrite + name: TRANSACTION_ISOLATION + optional: '[REPEATABLE-READ|READ-COMMITTED|READ-UNCOMMITTED|SERIALIZABLE]' + restart: false + unit: STRING + - defaultValue: '500' + divisibilityFactor: 1 + mode: readwrite + name: XPROTO_MAX_DN_CONCURRENT + optional: '[1-8192]' + restart: false + unit: INT + - defaultValue: '32' + divisibilityFactor: 1 + mode: readwrite + name: XPROTO_MAX_DN_WAIT_CONNECTION + optional: '[1-8192]' + restart: false + unit: INT + - defaultValue: 'false' + divisibilityFactor: 1 + mode: readwrite + name: ENABLE_COROUTINE + optional: '[true|false]' + restart: true + unit: STRING dn: name: dnTemplate paramList: - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: auto_increment_increment - optional: '[1-65535]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: auto_increment_offset - optional: '[1-65535]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: autocommit - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: automatic_sp_privileges - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: avoid_temporal_upgrade - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '3000' - divisibilityFactor: 1 - mode: readwrite - name: back_log - optional: '[0-65535]' - restart: true - unit: INT - - defaultValue: '1048576' - divisibilityFactor: 4096 - mode: readwrite - name: binlog_cache_size - optional: '[4096-16777216]' - restart: false - unit: INT - - defaultValue: CRC32 - divisibilityFactor: 0 - mode: readwrite - name: binlog_checksum - optional: '[CRC32|NONE]' - restart: true - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 1 - mode: readwrite - name: binlog_order_commits - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: full - divisibilityFactor: 0 - mode: readwrite - name: binlog_row_image - optional: '[full|minimal]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: binlog_rows_query_log_events - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '32768' - divisibilityFactor: 4096 - mode: readwrite - name: binlog_stmt_cache_size - optional: '[4096-16777216]' - restart: false - unit: INT - - defaultValue: WRITESET - divisibilityFactor: 1 - mode: readwrite - name: binlog_transaction_dependency_tracking - optional: '[WRITESET|WRITESET_SESSION|COMMIT_ORDER]' - restart: false - unit: STRING - - defaultValue: '"aes-128-ecb"' - divisibilityFactor: 1 - mode: readwrite - name: block_encryption_mode - optional: '["aes-128-ecb"|"aes-192-ecb"|"aes-256-ecb"|"aes-128-cbc"|"aes-192-cbc"|"aes-256-cbc"]' - restart: false - unit: STRING - - defaultValue: '4194304' - divisibilityFactor: 1 - mode: readwrite - name: bulk_insert_buffer_size - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: utf8 - divisibilityFactor: 0 - mode: readwrite - name: character_set_server - optional: '[utf8|latin1|gbk|gb18030|utf8mb4]' - restart: true - unit: STRING - - defaultValue: '2' - divisibilityFactor: 0 - mode: readwrite - name: concurrent_insert - optional: '[0|1|2]' - restart: false - unit: STRING - - defaultValue: '10' - divisibilityFactor: 1 - mode: readwrite - name: connect_timeout - optional: '[1-3600]' - restart: false - unit: INT - - defaultValue: mysql_native_password - divisibilityFactor: 0 - mode: readwrite - name: default_authentication_plugin - optional: '[mysql_native_password|sha256_password|caching_sha2_password]' - restart: true - unit: STRING - - defaultValue: InnoDB - divisibilityFactor: 0 - mode: readwrite - name: default_storage_engine - optional: '[InnoDB|innodb]' - restart: true - unit: STRING - - defaultValue: '+8:00' - divisibilityFactor: 0 - mode: readwrite - name: default_time_zone - optional: '[SYSTEM|-12:00|-11:00|-10:00|-9:00|-8:00|-7:00|-6:00|-5:00|-4:00|-3:00|-2:00|-1:00|\+0:00|\+1:00|\+2:00|\+3:00|\+4:00|\+5:00|\+5:30|\+5:45|\+6:00|\+6:30|\+7:00|\+8:00|\+9:00|\+10:00|\+11:00|\+12:00|\+13:00]' - restart: true - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: default_week_format - optional: '[0-7]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: delay_key_write - optional: '[ON|OFF|ALL]' - restart: false - unit: STRING - - defaultValue: '100' - divisibilityFactor: 1 - mode: readwrite - name: delayed_insert_limit - optional: '[1-4294967295]' - restart: false - unit: INT - - defaultValue: '300' - divisibilityFactor: 1 - mode: readwrite - name: delayed_insert_timeout - optional: '[1-3600]' - restart: false - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: delayed_queue_size - optional: '[1-4294967295]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: disconnect_on_expired_password - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: div_precision_increment - optional: '[0-30]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: end_markers_in_json - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: enforce_gtid_consistency - optional: .* - restart: true - unit: STRING - - defaultValue: '200' - divisibilityFactor: 1 - mode: readwrite - name: eq_range_index_dive_limit - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: event_scheduler - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: expire_logs_days - optional: .* - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: explicit_defaults_for_timestamp - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: flush_time - optional: '[0-31536000]' - restart: false - unit: INT - - defaultValue: '84' - divisibilityFactor: 1 - mode: readwrite - name: ft_max_word_len - optional: '[10-4294967295]' - restart: true - unit: INT - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: ft_min_word_len - optional: '[1-3600]' - restart: true - unit: INT - - defaultValue: '20' - divisibilityFactor: 1 - mode: readwrite - name: ft_query_expansion_limit - optional: '[0-1000]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: general_log - optional: 'OFF' - restart: true - unit: STRING - - defaultValue: '1024' - divisibilityFactor: 1 - mode: readwrite - name: group_concat_max_len - optional: '[4-1844674407370954752]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: gtid_mode - optional: .* - restart: true - unit: STRING - - defaultValue: '644' - divisibilityFactor: 1 - mode: readwrite - name: host_cache_size - optional: '[0-65535]' - restart: false - unit: INT - - defaultValue: '''''' - divisibilityFactor: 0 - mode: readwrite - name: init_connect - optional: '[''''|''set names utf8mb4''|''set names utf8''|''set default_collation_for_utf8mb4=utf8mb4_general_ci''|''set + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: auto_increment_increment + optional: '[1-65535]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: auto_increment_offset + optional: '[1-65535]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: autocommit + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: automatic_sp_privileges + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: avoid_temporal_upgrade + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '3000' + divisibilityFactor: 1 + mode: readwrite + name: back_log + optional: '[0-65535]' + restart: true + unit: INT + - defaultValue: '1048576' + divisibilityFactor: 4096 + mode: readwrite + name: binlog_cache_size + optional: '[4096-16777216]' + restart: false + unit: INT + - defaultValue: CRC32 + divisibilityFactor: 0 + mode: readwrite + name: binlog_checksum + optional: '[CRC32|NONE]' + restart: true + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 1 + mode: readwrite + name: binlog_order_commits + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: full + divisibilityFactor: 0 + mode: readwrite + name: binlog_row_image + optional: '[full|minimal]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: binlog_rows_query_log_events + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '32768' + divisibilityFactor: 4096 + mode: readwrite + name: binlog_stmt_cache_size + optional: '[4096-16777216]' + restart: false + unit: INT + - defaultValue: WRITESET + divisibilityFactor: 1 + mode: readwrite + name: binlog_transaction_dependency_tracking + optional: '[WRITESET|WRITESET_SESSION|COMMIT_ORDER]' + restart: false + unit: STRING + - defaultValue: '"aes-128-ecb"' + divisibilityFactor: 1 + mode: readwrite + name: block_encryption_mode + optional: '["aes-128-ecb"|"aes-192-ecb"|"aes-256-ecb"|"aes-128-cbc"|"aes-192-cbc"|"aes-256-cbc"]' + restart: false + unit: STRING + - defaultValue: '4194304' + divisibilityFactor: 1 + mode: readwrite + name: bulk_insert_buffer_size + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: utf8 + divisibilityFactor: 0 + mode: readwrite + name: character_set_server + optional: '[utf8|latin1|gbk|gb18030|utf8mb4]' + restart: true + unit: STRING + - defaultValue: '2' + divisibilityFactor: 0 + mode: readwrite + name: concurrent_insert + optional: '[0|1|2]' + restart: false + unit: STRING + - defaultValue: '10' + divisibilityFactor: 1 + mode: readwrite + name: connect_timeout + optional: '[1-3600]' + restart: false + unit: INT + - defaultValue: mysql_native_password + divisibilityFactor: 0 + mode: readwrite + name: default_authentication_plugin + optional: '[mysql_native_password|sha256_password|caching_sha2_password]' + restart: true + unit: STRING + - defaultValue: InnoDB + divisibilityFactor: 0 + mode: readwrite + name: default_storage_engine + optional: '[InnoDB|innodb]' + restart: true + unit: STRING + - defaultValue: '+8:00' + divisibilityFactor: 0 + mode: readwrite + name: default_time_zone + optional: '[SYSTEM|-12:00|-11:00|-10:00|-9:00|-8:00|-7:00|-6:00|-5:00|-4:00|-3:00|-2:00|-1:00|\+0:00|\+1:00|\+2:00|\+3:00|\+4:00|\+5:00|\+5:30|\+5:45|\+6:00|\+6:30|\+7:00|\+8:00|\+9:00|\+10:00|\+11:00|\+12:00|\+13:00]' + restart: true + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: default_week_format + optional: '[0-7]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: delay_key_write + optional: '[ON|OFF|ALL]' + restart: false + unit: STRING + - defaultValue: '100' + divisibilityFactor: 1 + mode: readwrite + name: delayed_insert_limit + optional: '[1-4294967295]' + restart: false + unit: INT + - defaultValue: '300' + divisibilityFactor: 1 + mode: readwrite + name: delayed_insert_timeout + optional: '[1-3600]' + restart: false + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: delayed_queue_size + optional: '[1-4294967295]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: disconnect_on_expired_password + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: div_precision_increment + optional: '[0-30]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: end_markers_in_json + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: enforce_gtid_consistency + optional: .* + restart: true + unit: STRING + - defaultValue: '200' + divisibilityFactor: 1 + mode: readwrite + name: eq_range_index_dive_limit + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: event_scheduler + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: expire_logs_days + optional: .* + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: explicit_defaults_for_timestamp + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: flush_time + optional: '[0-31536000]' + restart: false + unit: INT + - defaultValue: '84' + divisibilityFactor: 1 + mode: readwrite + name: ft_max_word_len + optional: '[10-4294967295]' + restart: true + unit: INT + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: ft_min_word_len + optional: '[1-3600]' + restart: true + unit: INT + - defaultValue: '20' + divisibilityFactor: 1 + mode: readwrite + name: ft_query_expansion_limit + optional: '[0-1000]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: general_log + optional: 'OFF' + restart: true + unit: STRING + - defaultValue: '1024' + divisibilityFactor: 1 + mode: readwrite + name: group_concat_max_len + optional: '[4-1844674407370954752]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: gtid_mode + optional: .* + restart: true + unit: STRING + - defaultValue: '644' + divisibilityFactor: 1 + mode: readwrite + name: host_cache_size + optional: '[0-65535]' + restart: false + unit: INT + - defaultValue: '''''' + divisibilityFactor: 0 + mode: readwrite + name: init_connect + optional: '[''''|''set names utf8mb4''|''set names utf8''|''set default_collation_for_utf8mb4=utf8mb4_general_ci''|''set default_collation_for_utf8mb4=utf8mb4_general_ci;set names utf8mb4''|''set names utf8mb4 collate utf8mb4_general_ci'']' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_adaptive_flushing - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '10' - divisibilityFactor: 1 - mode: readwrite - name: innodb_adaptive_flushing_lwm - optional: '[0-70]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_adaptive_hash_index - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '150000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_adaptive_max_sleep_delay - optional: '[1-1000000]' - restart: false - unit: INT - - defaultValue: '64' - divisibilityFactor: 1 - mode: readwrite - name: innodb_autoextend_increment - optional: '[1-1000]' - restart: false - unit: INT - - defaultValue: '2' - divisibilityFactor: 0 - mode: readwrite - name: innodb_autoinc_lock_mode - optional: '[0|1|2]' - restart: true - unit: STRING - - defaultValue: '33554432' - divisibilityFactor: 1048576 - mode: readwrite - name: innodb_buffer_pool_chunk_size - optional: '[1048576-9223372036854775807]' - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_buffer_pool_dump_at_shutdown - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '25' - divisibilityFactor: 1 - mode: readwrite - name: innodb_buffer_pool_dump_pct - optional: '[1-100]' - restart: false - unit: INT - - defaultValue: '8' - divisibilityFactor: 1 - mode: readwrite - name: innodb_buffer_pool_instances - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_buffer_pool_load_at_startup - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '{DBInstanceClassMemory*3/4}' - divisibilityFactor: 1 - mode: readwrite - name: innodb_buffer_pool_size - optional: '[134217728-18446744073709551615]' - restart: true - unit: INT - - defaultValue: '25' - divisibilityFactor: 1 - mode: readwrite - name: innodb_change_buffer_max_size - optional: '[0-50]' - restart: false - unit: INT - - defaultValue: none - divisibilityFactor: 0 - mode: readwrite - name: innodb_change_buffering - optional: '[none|inserts|deletes|changes|purges|all]' - restart: false - unit: STRING - - defaultValue: crc32 - divisibilityFactor: 0 - mode: readwrite - name: innodb_checksum_algorithm - optional: '[innodb|crc32|none|strict_innodb|strict_crc32|strict_none]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_cmp_per_index_enabled - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '9999999999' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_commit_cleanout_max_rows - optional: '[0-9223372036854775807]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_commit_concurrency - optional: '[0-1000]' - restart: true - unit: INT - - defaultValue: '5' - divisibilityFactor: 1 - mode: readwrite - name: innodb_compression_failure_threshold_pct - optional: '[0-100]' - restart: false - unit: INT - - defaultValue: '6' - divisibilityFactor: 1 - mode: readwrite - name: innodb_compression_level - optional: '[0-9]' - restart: false - unit: INT - - defaultValue: '50' - divisibilityFactor: 1 - mode: readwrite - name: innodb_compression_pad_pct_max - optional: '[0-70]' - restart: false - unit: INT - - defaultValue: '5000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_concurrency_tickets - optional: '[1-4294967295]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_data_file_purge - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '100' - divisibilityFactor: 1 - mode: readwrite - name: innodb_data_file_purge_interval - optional: '[0-10000]' - restart: false - unit: INT - - defaultValue: '128' - divisibilityFactor: 1 - mode: readwrite - name: innodb_data_file_purge_max_size - optional: '[16-1073741824]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_deadlock_detect - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_disable_sort_file_cache - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_equal_gcn_visible - optional: .* - restart: true - unit: INT - - defaultValue: '1' - divisibilityFactor: 0 - mode: readwrite - name: innodb_flush_log_at_trx_commit - optional: '[0|1|2]' - restart: false - unit: STRING - - defaultValue: O_DIRECT - divisibilityFactor: 0 - mode: readwrite - name: innodb_flush_method - optional: '[fsync|O_DSYNC|littlesync|nosync|O_DIRECT|O_DIRECT_NO_FSYNC]' - restart: true - unit: STRING - - defaultValue: '0' - divisibilityFactor: 0 - mode: readwrite - name: innodb_flush_neighbors - optional: '[0|1|2]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_flush_sync - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '8000000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_cache_size - optional: '[1600000-80000000]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_ft_enable_diag_print - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_ft_enable_stopword - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '84' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_max_token_size - optional: '[10-84]' - restart: true - unit: INT - - defaultValue: '3' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_min_token_size - optional: '[0-16]' - restart: true - unit: INT - - defaultValue: '2000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_num_word_optimize - optional: '[0-10000]' - restart: false - unit: INT - - defaultValue: '2000000000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_result_cache_limit - optional: '[1000000-4294967295]' - restart: false - unit: INT - - defaultValue: '2' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_sort_pll_degree - optional: '[1-16]' - restart: true - unit: INT - - defaultValue: '640000000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_ft_total_cache_size - optional: '[32000000-1600000000]' - restart: true - unit: INT - - defaultValue: '20000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_io_capacity - optional: '[0-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '40000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_io_capacity_max - optional: '[0-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '50' - divisibilityFactor: 1 - mode: readwrite - name: innodb_lock_wait_timeout - optional: '[1-1073741824]' - restart: false - unit: INT - - defaultValue: '209715200' - divisibilityFactor: 1 - mode: readwrite - name: innodb_log_buffer_size - optional: .* - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_log_checksums - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_log_compressed_pages - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '2147483648' - divisibilityFactor: 1024 - mode: readwrite - name: innodb_log_file_size - optional: '[4194304-107374182400]' - restart: true - unit: INT - - defaultValue: '8192' - divisibilityFactor: 1 - mode: readwrite - name: innodb_lru_scan_depth - optional: '[100-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '75' - divisibilityFactor: 1 - mode: readwrite - name: innodb_max_dirty_pages_pct - optional: '[0-99]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_max_dirty_pages_pct_lwm - optional: '[0-99]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_max_purge_lag - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_max_purge_lag_delay - optional: '[0-10000000]' - restart: false - unit: INT - - defaultValue: '1073741824' - divisibilityFactor: 1 - mode: readwrite - name: innodb_max_undo_log_size - optional: '[10485760-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '' - divisibilityFactor: 0 - mode: readwrite - name: innodb_monitor_disable - optional: all - restart: false - unit: STRING - - defaultValue: '' - divisibilityFactor: 0 - mode: readwrite - name: innodb_monitor_enable - optional: all - restart: false - unit: STRING - - defaultValue: '37' - divisibilityFactor: 1 - mode: readwrite - name: innodb_old_blocks_pct - optional: '[5-95]' - restart: false - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_old_blocks_time - optional: '[0-1024]' - restart: false - unit: INT - - defaultValue: '134217728' - divisibilityFactor: 1 - mode: readwrite - name: innodb_online_alter_log_max_size - optional: '[134217728-2147483647]' - restart: false - unit: INT - - defaultValue: '20000' - divisibilityFactor: 1 - mode: readwrite - name: innodb_open_files - optional: '[10-2147483647]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_optimize_fulltext_only - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: innodb_page_cleaners - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_print_all_deadlocks - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: '300' - divisibilityFactor: 1 - mode: readwrite - name: innodb_purge_batch_size - optional: '[1-5000]' - restart: true - unit: INT - - defaultValue: '128' - divisibilityFactor: 1 - mode: readwrite - name: innodb_purge_rseg_truncate_frequency - optional: '[1-128]' - restart: false - unit: INT - - defaultValue: '2' - divisibilityFactor: 1 - mode: readwrite - name: innodb_purge_threads - optional: '[1-32]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_random_read_ahead - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_read_ahead_threshold - optional: '[0-1024]' - restart: false - unit: INT - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: innodb_read_io_threads - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_rollback_on_timeout - optional: '[OFF|ON]' - restart: true - unit: STRING - - defaultValue: '128' - divisibilityFactor: 1 - mode: readwrite - name: innodb_rollback_segments - optional: '[1-128]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: innodb_snapshot_update_gcn - optional: .* - restart: true - unit: INT - - defaultValue: '1048576' - divisibilityFactor: 512 - mode: readwrite - name: innodb_sort_buffer_size - optional: '[65536-67108864]' - restart: true - unit: INT - - defaultValue: '6' - divisibilityFactor: 1 - mode: readwrite - name: innodb_spin_wait_delay - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_stats_auto_recalc - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: nulls_equal - divisibilityFactor: 0 - mode: readwrite - name: innodb_stats_method - optional: '[nulls_equal|nulls_unequal|nulls_ignored]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_stats_on_metadata - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_stats_persistent - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '20' - divisibilityFactor: 1 - mode: readwrite - name: innodb_stats_persistent_sample_pages - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '8' - divisibilityFactor: 1 - mode: readwrite - name: innodb_stats_transient_sample_pages - optional: '[1-4294967295]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_status_output - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_status_output_locks - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: innodb_strict_mode - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '16' - divisibilityFactor: 1 - mode: readwrite - name: innodb_sync_array_size - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: '30' - divisibilityFactor: 1 - mode: readwrite - name: innodb_sync_spin_loops - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: innodb_table_locks - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: block - divisibilityFactor: 0 - mode: readwrite - name: innodb_tcn_cache_level - optional: .* - restart: true - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_thread_concurrency - optional: '[0-1000]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: innodb_thread_sleep_delay - optional: '[0-1000000]' - restart: false - unit: INT - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: innodb_write_io_threads - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: '7200' - divisibilityFactor: 1 - mode: readwrite - name: interactive_timeout - optional: '[10-86400]' - restart: false - unit: INT - - defaultValue: '{LEAST(DBInstanceClassMemory/1048576*128, 262144)}' - divisibilityFactor: 1 - mode: readwrite - name: join_buffer_size - optional: '[128-4294967295]' - restart: false - unit: INT - - defaultValue: '16777216' - divisibilityFactor: 1 - mode: readwrite - name: key_buffer_size - optional: .* - restart: true - unit: INT - - defaultValue: '300' - divisibilityFactor: 100 - mode: readwrite - name: key_cache_age_threshold - optional: '[100-4294967295]' - restart: false - unit: INT - - defaultValue: '1024' - divisibilityFactor: 512 - mode: readwrite - name: key_cache_block_size - optional: '[512-16384]' - restart: false - unit: B - - defaultValue: '100' - divisibilityFactor: 1 - mode: readwrite - name: key_cache_division_limit - optional: '[1-100]' - restart: false - unit: INT - - defaultValue: en_US - divisibilityFactor: 0 - mode: readwrite - name: lc_time_names - optional: '[ja_JP|pt_BR|en_US]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: local_infile - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '31536000' - divisibilityFactor: 1 - mode: readwrite - name: lock_wait_timeout - optional: '[1-1073741824]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 0 - mode: readwrite - name: log_bin_use_v1_row_events - optional: '[0|1]' - restart: false - unit: STRING - - defaultValue: '2' - divisibilityFactor: 1 - mode: readwrite - name: log_error_verbosity - optional: '[1-3]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: log_queries_not_using_indexes - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 0 - mode: readwrite - name: log_slave_updates - optional: .* - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: log_slow_admin_statements - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: log_throttle_queries_not_using_indexes - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 6 - mode: readwrite - name: long_query_time - optional: '[0.1-31536000]' - restart: false - unit: DOUBLE - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_ccl_max_waiting_count - optional: '[0-9223372036854775807]' - restart: false - unit: INT - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: loose_ccl_queue_bucket_count - optional: '[1-64]' - restart: false - unit: INT - - defaultValue: '64' - divisibilityFactor: 1 - mode: readwrite - name: loose_ccl_queue_bucket_size - optional: '[1-4096]' - restart: false - unit: INT - - defaultValue: '86400' - divisibilityFactor: 1 - mode: readwrite - name: loose_ccl_wait_timeout - optional: '[1-31536000]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: loose_consensus_auto_leader_transfer - optional: .* - restart: true - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: loose_consensus_auto_reset_match_index - optional: .* - restart: true - unit: STRING - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_election_timeout - optional: .* - restart: true - unit: INT - - defaultValue: '8' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_io_thread_cnt - optional: .* - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: loose_consensus_large_trx - optional: .* - restart: true - unit: STRING - - defaultValue: '536870912' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_log_cache_size - optional: .* - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_max_delay_index - optional: .* - restart: true - unit: INT - - defaultValue: '20971520' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_max_log_size - optional: .* - restart: true - unit: INT - - defaultValue: '131072' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_max_packet_size - optional: .* - restart: true - unit: INT - - defaultValue: '268435456' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_prefetch_cache_size - optional: .* - restart: true - unit: INT - - defaultValue: '8' - divisibilityFactor: 1 - mode: readwrite - name: loose_consensus_worker_thread_cnt - optional: .* - restart: true - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: loose_implicit_primary_key - optional: '[0-1]' - restart: false - unit: INT - - defaultValue: '86400' - divisibilityFactor: 1 - mode: readwrite - name: loose_information_schema_stats_expiry - optional: '[0-31536000]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_buffer_pool_in_core_file - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: '64' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_doublewrite_pages - optional: '[0-512]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_lizard_stat_enabled - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_log_compressed_pages - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_log_optimize_ddl - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '4096' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_log_write_ahead_size - optional: '[512-16384]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_multi_blocks_enabled - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_innodb_numa_interleave - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_parallel_read_threads - optional: '[0-256]' - restart: false - unit: INT - - defaultValue: '1800' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_undo_retention - optional: '[0-172800]' - restart: false - unit: INT - - defaultValue: '1024' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_undo_space_reserved_size - optional: '[0-20480]' - restart: false - unit: INT - - defaultValue: '102400' - divisibilityFactor: 1 - mode: readwrite - name: loose_innodb_undo_space_supremum_size - optional: '[0-524288]' - restart: false - unit: INT - - defaultValue: TempTable - divisibilityFactor: 0 - mode: readwrite - name: loose_internal_tmp_mem_storage_engine - optional: '[TempTable|MEMORY]' - restart: false - unit: STRING - - defaultValue: index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on - divisibilityFactor: 0 - mode: readwrite - name: loose_optimizer_switch - optional: .* - restart: false - unit: STRING - - defaultValue: enabled=off,one_line=off - divisibilityFactor: 0 - mode: readwrite - name: loose_optimizer_trace - optional: .* - restart: false - unit: STRING - - defaultValue: greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on - divisibilityFactor: 0 - mode: readwrite - name: loose_optimizer_trace_features - optional: .* - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_point_lock_rwlock_enabled - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_accounts_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_stages_current - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_stages_history - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_stages_history_long - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_statements_current - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_statements_history - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_statements_history_long - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_transactions_current - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_transactions_history - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_transactions_history_long - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_waits_current - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_waits_history - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_events_waits_history_long - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_global_instrumentation - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_statements_digest - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_consumer_thread_instrumentation - optional: '[OFF|ON]' - restart: false - unit: STRING - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_digests_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_error_size - optional: '[0-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_stages_history_long_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_stages_history_size - optional: '[-1-1024]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_statements_history_long_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_statements_history_size - optional: '[-1-1024]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_transactions_history_long_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_transactions_history_size - optional: '[-1-1024]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_waits_history_long_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_events_waits_history_size - optional: '[-1-1024]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_hosts_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: "'%%%%=OFF'" - divisibilityFactor: 0 - mode: readwrite - name: performance_schema_instrument - optional: .* - restart: true - unit: STRING - - defaultValue: '''wait/lock/metadata/sql/mdl=ON''' - divisibilityFactor: 0 - mode: readwrite - name: loose_performance_schema_instrument - optional: .* - restart: true - unit: STRING - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_cond_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_cond_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_digest_length - optional: '[0-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_digest_sample_age - optional: '[0-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_file_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_file_handles - optional: '[-1-32768]' - restart: true - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_file_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_index_stat - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_memory_classes - optional: '[0-1024]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_metadata_locks - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_mutex_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_mutex_instances - optional: '[-1-104857600]' - restart: true - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_prepared_statements_instances - optional: '[-1-4194304]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_program_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_rwlock_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_rwlock_instances - optional: '[-1-104857600]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_socket_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_socket_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_sql_text_length - optional: '[0-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_stage_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_statement_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_statement_stack - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_table_handles - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '1000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_table_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_table_lock_stat - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_thread_classes - optional: '[0-256]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_max_thread_instances - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_session_connect_attrs_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_setup_actors_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_setup_objects_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: loose_performance_schema_users_size - optional: '[-1-1048576]' - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 1 - mode: readwrite - name: loose_persist_binlog_to_redo - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '1048576' - divisibilityFactor: 1 - mode: readwrite - name: loose_persist_binlog_to_redo_size_limit - optional: '[0-10485760]' - restart: false - unit: STRING - - defaultValue: '16777216' - divisibilityFactor: 1 - mode: readwrite - name: loose_rds_audit_log_buffer_size - optional: '[16777216-104857600]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_rds_audit_log_enabled - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '8192' - divisibilityFactor: 1 - mode: readwrite - name: loose_rds_audit_log_event_buffer_size - optional: '[0-32768]' - restart: false - unit: INT - - defaultValue: '100000' - divisibilityFactor: 1 - mode: readwrite - name: loose_rds_audit_log_row_limit - optional: '[0-100000000]' - restart: false - unit: INT - - defaultValue: MYSQL_V1 - divisibilityFactor: 0 - mode: readwrite - name: loose_rds_audit_log_version - optional: '[MYSQL_V1|MYSQL_V3]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_recovery_apply_binlog - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '3000' - divisibilityFactor: 1 - mode: readwrite - name: loose_replica_read_timeout - optional: '[0-2147483647]' - restart: true - unit: INT - - defaultValue: '"*"' - divisibilityFactor: 0 - mode: readwrite - name: loose_session_track_system_variables - optional: .* - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: loose_session_track_transaction_info - optional: '[STATE|CHARACTERISTICS|OFF]' - restart: false - unit: STRING - - defaultValue: '32' - divisibilityFactor: 1 - mode: readwrite - name: loose_slave_parallel_workers - optional: '[0-1024]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 0 - mode: readwrite - name: low_priority_updates - optional: '[0|1]' - restart: false - unit: STRING - - defaultValue: '1' - divisibilityFactor: 0 - mode: readwrite - name: lower_case_table_names - optional: '[0|1]' - restart: true - unit: STRING - - defaultValue: TABLE - divisibilityFactor: 0 - mode: readwrite - name: master_info_repository - optional: '[TABLE|FILE]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: master_verify_checksum - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '1073741824' - divisibilityFactor: 1 - mode: readwrite - name: max_allowed_packet - optional: '[16384-1073741824]' - restart: false - unit: INT - - defaultValue: '18446744073709551615' - divisibilityFactor: 1 - mode: readwrite - name: max_binlog_cache_size - optional: '[4096-18446744073709547520]' - restart: false - unit: INT - - defaultValue: '18446744073709551615' - divisibilityFactor: 4096 - mode: readwrite - name: max_binlog_stmt_cache_size - optional: '[4096-18446744073709547520]' - restart: false - unit: INT - - defaultValue: '65536' - divisibilityFactor: 1 - mode: readwrite - name: max_connect_errors - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '5532' - divisibilityFactor: 1 - mode: readwrite - name: max_connections - optional: .* - restart: true - unit: INT - - defaultValue: '1024' - divisibilityFactor: 1 - mode: readwrite - name: max_error_count - optional: '[0-65535]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: max_execution_time - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '67108864' - divisibilityFactor: 1024 - mode: readwrite - name: max_heap_table_size - optional: '[16384-1844674407370954752]' - restart: false - unit: INT - - defaultValue: '18446744073709551615' - divisibilityFactor: 1 - mode: readwrite - name: max_join_size - optional: '[1-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '4096' - divisibilityFactor: 1 - mode: readwrite - name: max_length_for_sort_data - optional: '[0-838860]' - restart: false - unit: INT - - defaultValue: '65536' - divisibilityFactor: 1 - mode: readwrite - name: max_points_in_geometry - optional: '[3-1048576]' - restart: false - unit: INT - - defaultValue: '16382' - divisibilityFactor: 1 - mode: readwrite - name: max_prepared_stmt_count - optional: '[0-1048576]' - restart: false - unit: INT - - defaultValue: '18446744073709551615' - divisibilityFactor: 1 - mode: readwrite - name: max_seeks_for_key - optional: '[1-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '1024' - divisibilityFactor: 1 - mode: readwrite - name: max_sort_length - optional: '[4-8388608]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: max_sp_recursion_depth - optional: '[0-255]' - restart: false - unit: INT - - defaultValue: '5000' - divisibilityFactor: 1 - mode: readwrite - name: max_user_connections - optional: .* - restart: true - unit: INT - - defaultValue: '102400' - divisibilityFactor: 1 - mode: readwrite - name: max_write_lock_count - optional: '[1-102400]' - restart: false - unit: INT - - defaultValue: '0' - divisibilityFactor: 1 - mode: readwrite - name: min_examined_row_limit - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '262144' - divisibilityFactor: 1 - mode: readwrite - name: myisam_sort_buffer_size - optional: '[262144-16777216]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: mysql_native_password_proxy_users - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '16384' - divisibilityFactor: 1024 - mode: readwrite - name: net_buffer_length - optional: '[1024-1048576]' - restart: false - unit: INT - - defaultValue: '30' - divisibilityFactor: 1 - mode: readwrite - name: net_read_timeout - optional: '[1-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '10' - divisibilityFactor: 1 - mode: readwrite - name: net_retry_count - optional: '[1-4294967295]' - restart: false - unit: INT - - defaultValue: '60' - divisibilityFactor: 1 - mode: readwrite - name: net_write_timeout - optional: '[1-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '2' - divisibilityFactor: 1 - mode: readwrite - name: ngram_token_size - optional: '[0-20]' - restart: true - unit: int - - defaultValue: '65535' - divisibilityFactor: 1 - mode: readwrite - name: open_files_limit - optional: '[1-2147483647]' - restart: true - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: opt_indexstat - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: opt_tablestat - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '1' - divisibilityFactor: 0 - mode: readwrite - name: optimizer_prune_level - optional: '[0|1]' - restart: false - unit: STRING - - defaultValue: '62' - divisibilityFactor: 1 - mode: readwrite - name: optimizer_search_depth - optional: '[0-62]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: optimizer_trace_limit - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '1048576' - divisibilityFactor: 1 - mode: readwrite - name: optimizer_trace_max_mem_size - optional: '[0-4294967295]' - restart: false - unit: INT - - defaultValue: '-1' - divisibilityFactor: 1 - mode: readwrite - name: optimizer_trace_offset - optional: '[-2147483648-2147483647]' - restart: false - unit: INT - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: performance_schema - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: '32768' - divisibilityFactor: 1 - mode: readwrite - name: preload_buffer_size - optional: '[1024-1073741824]' - restart: false - unit: INT - - defaultValue: '8192' - divisibilityFactor: 1024 - mode: readwrite - name: query_alloc_block_size - optional: '[1024-16384]' - restart: false - unit: INT - - defaultValue: '8192' - divisibilityFactor: 1024 - mode: readwrite - name: query_prealloc_size - optional: '[8192-1048576]' - restart: false - unit: INT - - defaultValue: '4096' - divisibilityFactor: 1 - mode: readwrite - name: range_alloc_block_size - optional: '[4096-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '8388608' - divisibilityFactor: 1 - mode: readwrite - name: range_optimizer_max_mem_size - optional: '[0-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '{LEAST(DBInstanceClassMemory/1048576*128, 262144)}' - divisibilityFactor: 1 - mode: readwrite - name: read_buffer_size - optional: '[8200-2147479552]' - restart: false - unit: INT - - defaultValue: '442368' - divisibilityFactor: 1 - mode: readwrite - name: read_rnd_buffer_size - optional: .* - restart: true - unit: INT - - defaultValue: TABLE - divisibilityFactor: 0 - mode: readwrite - name: relay_log_info_repository - optional: '[TABLE|FILE]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: relay_log_purge - optional: .* - restart: true - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: relay_log_recovery - optional: '[ON|OFF]' - restart: true - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: replicate_same_server_id - optional: .* - restart: true - unit: STRING - - defaultValue: '' - divisibilityFactor: 0 - mode: readwrite - name: rotate_log_table_last_name - optional: .* - restart: true - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: session_track_gtids - optional: '[OFF|OWN_GTID|ALL_GTIDS]' - restart: false - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: session_track_schema - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: session_track_state_change - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: sha256_password_proxy_users - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: show_old_temporals - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: skip_slave_start - optional: .* - restart: true - unit: STRING - - defaultValue: 'ON' - divisibilityFactor: 0 - mode: readwrite - name: skip_ssl - optional: .* - restart: true - unit: STRING - - defaultValue: strict - divisibilityFactor: 0 - mode: readwrite - name: slave_exec_mode - optional: strict - restart: false - unit: STRING - - defaultValue: '4' - divisibilityFactor: 1 - mode: readwrite - name: slave_net_timeout - optional: '[15-300]' - restart: false - unit: INT - - defaultValue: LOGICAL_CLOCK - divisibilityFactor: 0 - mode: readwrite - name: slave_parallel_type - optional: '[DATABASE|LOGICAL_CLOCK]' - restart: true - unit: STRING - - defaultValue: '1073741824' - divisibilityFactor: 1 - mode: readwrite - name: slave_pending_jobs_size_max - optional: .* - restart: true - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: slave_sql_verify_checksum - optional: .* - restart: true - unit: STRING - - defaultValue: '' - divisibilityFactor: 0 - mode: readwrite - name: slave_type_conversions - optional: '[s*|ALL_LOSSY|ALL_NON_LOSSY|ALL_SIGNED|ALL_UNSIGNED]' - restart: true - unit: STRING - - defaultValue: '2' - divisibilityFactor: 1 - mode: readwrite - name: slow_launch_time - optional: '[1-1024]' - restart: false - unit: INT - - defaultValue: 'OFF' - divisibilityFactor: 0 - mode: readwrite - name: slow_query_log - optional: '[ON|OFF]' - restart: false - unit: STRING - - defaultValue: '868352' - divisibilityFactor: 1 - mode: readwrite - name: sort_buffer_size - optional: '[32768-4294967295]' - restart: false - unit: INT - - defaultValue: NO_ENGINE_SUBSTITUTION - divisibilityFactor: 0 - mode: readwrite - name: sql_mode - optional: (s*|REAL_AS_FLOAT|PIPES_AS_CONCAT|ANSI_QUOTES|IGNORE_SPACE|ONLY_FULL_GROUP_BY|NO_UNSIGNED_SUBTRACTION|NO_DIR_IN_CREATE|ANSI|NO_AUTO_VALUE_ON_ZERO|NO_BACKSLASH_ESCAPES|STRICT_TRANS_TABLES|STRICT_ALL_TABLES|NO_ZERO_IN_DATE|NO_ZERO_DATE|ALLOW_INVALID_DATES|ERROR_FOR_DIVISION_BY_ZERO|TRADITIONAL|HIGH_NOT_PRECEDENCE|NO_ENGINE_SUBSTITUTION|PAD_CHAR_TO_FULL_LENGTH)(,REAL_AS_FLOAT|,PIPES_AS_CONCAT|,ANSI_QUOTES|,IGNORE_SPACE|,ONLY_FULL_GROUP_BY|,NO_UNSIGNED_SUBTRACTION|,NO_DIR_IN_CREATE|,ANSI|,NO_AUTO_VALUE_ON_ZERO|,NO_BACKSLASH_ESCAPES|,STRICT_TRANS_TABLES|,STRICT_ALL_TABLES|,NO_ZERO_IN_DATE|,NO_ZERO_DATE|,ALLOW_INVALID_DATES|,ERROR_FOR_DIVISION_BY_ZERO|,TRADITIONAL|,HIGH_NOT_PRECEDENCE|,NO_ENGINE_SUBSTITUTION|,PAD_CHAR_TO_FULL_LENGTH)* - restart: false - unit: STRING - - defaultValue: '256' - divisibilityFactor: 1 - mode: readwrite - name: stored_program_cache - optional: '[16-524288]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: sync_binlog - optional: .* - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: sync_master_info - optional: '[0-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '1' - divisibilityFactor: 1 - mode: readwrite - name: sync_relay_log - optional: .* - restart: true - unit: INT - - defaultValue: '10000' - divisibilityFactor: 1 - mode: readwrite - name: sync_relay_log_info - optional: '[0-18446744073709551615]' - restart: false - unit: INT - - defaultValue: '{LEAST(DBInstanceClassMemory/1073741824*512, 2048)}' - divisibilityFactor: 1 - mode: readwrite - name: table_definition_cache - optional: '[400-524288]' - restart: false - unit: INT - - defaultValue: '{LEAST(DBInstanceClassMemory/1073741824*512, 8192)}' - divisibilityFactor: 1 - mode: readwrite - name: table_open_cache - optional: '[1-524288]' - restart: false - unit: INT - - defaultValue: '16' - divisibilityFactor: 1 - mode: readwrite - name: table_open_cache_instances - optional: '[1-64]' - restart: true - unit: INT - - defaultValue: '1073741824' - divisibilityFactor: 1 - mode: readwrite - name: temptable_max_ram - optional: '[2097152-107374182400]' - restart: false - unit: INT - - defaultValue: '100' - divisibilityFactor: 1 - mode: readwrite - name: thread_cache_size - optional: '[0-16384]' - restart: false - unit: INT - - defaultValue: '262144' - divisibilityFactor: 1024 - mode: readwrite - name: thread_stack - optional: '[131072-2147483647]' - restart: true - unit: INT - - defaultValue: TLSv1,TLSv1.1,TLSv1.2 - divisibilityFactor: 0 - mode: readwrite - name: tls_version - optional: '[TLSv1,TLSv1.1,TLSv1.2|TLSv1,TLSv1.1|TLSv1.2]' - restart: true - unit: STRING - - defaultValue: '2097152' - divisibilityFactor: 1 - mode: readwrite - name: tmp_table_size - optional: '[262144-134217728]' - restart: false - unit: INT - - defaultValue: '8192' - divisibilityFactor: 1024 - mode: readwrite - name: transaction_alloc_block_size - optional: '[1024-131072]' - restart: false - unit: INT - - defaultValue: REPEATABLE-READ - divisibilityFactor: 0 - mode: readwrite - name: transaction_isolation - optional: '[READ-UNCOMMITTED|READ-COMMITTED|REPEATABLE-READ|SERIALIZABLE]' - restart: false - unit: STRING - - defaultValue: '4096' - divisibilityFactor: 1024 - mode: readwrite - name: transaction_prealloc_size - optional: '[1024-131072]' - restart: false - unit: INT - - defaultValue: XXHASH64 - divisibilityFactor: 1 - mode: readwrite - name: transaction_write_set_extraction - optional: '[OFF|MURMUR32|XXHASH64]' - restart: false - unit: STRING - - defaultValue: 'YES' - divisibilityFactor: 0 - mode: readwrite - name: updatable_views_with_limit - optional: '[YES|NO]' - restart: false - unit: STRING - - defaultValue: '28800' - divisibilityFactor: 1 - mode: readwrite - name: wait_timeout - optional: '[1-31536000]' - restart: false - unit: INT + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_adaptive_flushing + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '10' + divisibilityFactor: 1 + mode: readwrite + name: innodb_adaptive_flushing_lwm + optional: '[0-70]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_adaptive_hash_index + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '150000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_adaptive_max_sleep_delay + optional: '[1-1000000]' + restart: false + unit: INT + - defaultValue: '64' + divisibilityFactor: 1 + mode: readwrite + name: innodb_autoextend_increment + optional: '[1-1000]' + restart: false + unit: INT + - defaultValue: '2' + divisibilityFactor: 0 + mode: readwrite + name: innodb_autoinc_lock_mode + optional: '[0|1|2]' + restart: true + unit: STRING + - defaultValue: '33554432' + divisibilityFactor: 1048576 + mode: readwrite + name: innodb_buffer_pool_chunk_size + optional: '[1048576-9223372036854775807]' + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_buffer_pool_dump_at_shutdown + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '25' + divisibilityFactor: 1 + mode: readwrite + name: innodb_buffer_pool_dump_pct + optional: '[1-100]' + restart: false + unit: INT + - defaultValue: '8' + divisibilityFactor: 1 + mode: readwrite + name: innodb_buffer_pool_instances + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_buffer_pool_load_at_startup + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '25' + divisibilityFactor: 1 + mode: readwrite + name: innodb_change_buffer_max_size + optional: '[0-50]' + restart: false + unit: INT + - defaultValue: all + divisibilityFactor: 0 + mode: readwrite + name: innodb_change_buffering + optional: '[none|inserts|deletes|changes|purges|all]' + restart: false + unit: STRING + - defaultValue: crc32 + divisibilityFactor: 0 + mode: readwrite + name: innodb_checksum_algorithm + optional: '[innodb|crc32|none|strict_innodb|strict_crc32|strict_none]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_cmp_per_index_enabled + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '9999999999' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_commit_cleanout_max_rows + optional: '[0-9223372036854775807]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_commit_concurrency + optional: '[0-1000]' + restart: true + unit: INT + - defaultValue: '5' + divisibilityFactor: 1 + mode: readwrite + name: innodb_compression_failure_threshold_pct + optional: '[0-100]' + restart: false + unit: INT + - defaultValue: '6' + divisibilityFactor: 1 + mode: readwrite + name: innodb_compression_level + optional: '[0-9]' + restart: false + unit: INT + - defaultValue: '50' + divisibilityFactor: 1 + mode: readwrite + name: innodb_compression_pad_pct_max + optional: '[0-70]' + restart: false + unit: INT + - defaultValue: '5000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_concurrency_tickets + optional: '[1-4294967295]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_data_file_purge + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '100' + divisibilityFactor: 1 + mode: readwrite + name: innodb_data_file_purge_interval + optional: '[0-10000]' + restart: false + unit: INT + - defaultValue: '128' + divisibilityFactor: 1 + mode: readwrite + name: innodb_data_file_purge_max_size + optional: '[16-1073741824]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_deadlock_detect + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_disable_sort_file_cache + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_equal_gcn_visible + optional: .* + restart: true + unit: INT + - defaultValue: '1' + divisibilityFactor: 0 + mode: readwrite + name: innodb_flush_log_at_trx_commit + optional: '[0|1|2]' + restart: false + unit: STRING + - defaultValue: O_DIRECT + divisibilityFactor: 0 + mode: readwrite + name: innodb_flush_method + optional: '[fsync|O_DSYNC|littlesync|nosync|O_DIRECT|O_DIRECT_NO_FSYNC]' + restart: true + unit: STRING + - defaultValue: '0' + divisibilityFactor: 0 + mode: readwrite + name: innodb_flush_neighbors + optional: '[0|1|2]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_flush_sync + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '8000000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_cache_size + optional: '[1600000-80000000]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_ft_enable_diag_print + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_ft_enable_stopword + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '84' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_max_token_size + optional: '[10-84]' + restart: true + unit: INT + - defaultValue: '3' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_min_token_size + optional: '[0-16]' + restart: true + unit: INT + - defaultValue: '2000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_num_word_optimize + optional: '[0-10000]' + restart: false + unit: INT + - defaultValue: '2000000000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_result_cache_limit + optional: '[1000000-4294967295]' + restart: false + unit: INT + - defaultValue: '2' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_sort_pll_degree + optional: '[1-16]' + restart: true + unit: INT + - defaultValue: '640000000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_ft_total_cache_size + optional: '[32000000-1600000000]' + restart: true + unit: INT + - defaultValue: '20000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_io_capacity + optional: '[0-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '40000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_io_capacity_max + optional: '[0-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '50' + divisibilityFactor: 1 + mode: readwrite + name: innodb_lock_wait_timeout + optional: '[1-1073741824]' + restart: false + unit: INT + - defaultValue: '209715200' + divisibilityFactor: 1 + mode: readwrite + name: innodb_log_buffer_size + optional: .* + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_log_checksums + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '2147483648' + divisibilityFactor: 1024 + mode: readwrite + name: innodb_log_file_size + optional: '[4194304-107374182400]' + restart: true + unit: INT + - defaultValue: '8192' + divisibilityFactor: 1 + mode: readwrite + name: innodb_lru_scan_depth + optional: '[100-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '75' + divisibilityFactor: 1 + mode: readwrite + name: innodb_max_dirty_pages_pct + optional: '[0-99]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_max_dirty_pages_pct_lwm + optional: '[0-99]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_max_purge_lag + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_max_purge_lag_delay + optional: '[0-10000000]' + restart: false + unit: INT + - defaultValue: '1073741824' + divisibilityFactor: 1 + mode: readwrite + name: innodb_max_undo_log_size + optional: '[10485760-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '' + divisibilityFactor: 0 + mode: readwrite + name: innodb_monitor_disable + optional: all + restart: false + unit: STRING + - defaultValue: '' + divisibilityFactor: 0 + mode: readwrite + name: innodb_monitor_enable + optional: all + restart: false + unit: STRING + - defaultValue: '37' + divisibilityFactor: 1 + mode: readwrite + name: innodb_old_blocks_pct + optional: '[5-95]' + restart: false + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_old_blocks_time + optional: '[0-1024]' + restart: false + unit: INT + - defaultValue: '134217728' + divisibilityFactor: 1 + mode: readwrite + name: innodb_online_alter_log_max_size + optional: '[134217728-2147483647]' + restart: false + unit: INT + - defaultValue: '20000' + divisibilityFactor: 1 + mode: readwrite + name: innodb_open_files + optional: '[10-2147483647]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_optimize_fulltext_only + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: innodb_page_cleaners + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_print_all_deadlocks + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: '300' + divisibilityFactor: 1 + mode: readwrite + name: innodb_purge_batch_size + optional: '[1-5000]' + restart: true + unit: INT + - defaultValue: '128' + divisibilityFactor: 1 + mode: readwrite + name: innodb_purge_rseg_truncate_frequency + optional: '[1-128]' + restart: false + unit: INT + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: innodb_purge_threads + optional: '[1-32]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_random_read_ahead + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_read_ahead_threshold + optional: '[0-1024]' + restart: false + unit: INT + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: innodb_read_io_threads + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_rollback_on_timeout + optional: '[OFF|ON]' + restart: true + unit: STRING + - defaultValue: '128' + divisibilityFactor: 1 + mode: readwrite + name: innodb_rollback_segments + optional: '[1-128]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_snapshot_update_gcn + optional: .* + restart: true + unit: INT + - defaultValue: '1048576' + divisibilityFactor: 512 + mode: readwrite + name: innodb_sort_buffer_size + optional: '[65536-67108864]' + restart: true + unit: INT + - defaultValue: '6' + divisibilityFactor: 1 + mode: readwrite + name: innodb_spin_wait_delay + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_stats_auto_recalc + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: nulls_equal + divisibilityFactor: 0 + mode: readwrite + name: innodb_stats_method + optional: '[nulls_equal|nulls_unequal|nulls_ignored]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_stats_on_metadata + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_stats_persistent + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '20' + divisibilityFactor: 1 + mode: readwrite + name: innodb_stats_persistent_sample_pages + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '8' + divisibilityFactor: 1 + mode: readwrite + name: innodb_stats_transient_sample_pages + optional: '[1-4294967295]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_status_output + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: innodb_status_output_locks + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_strict_mode + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '16' + divisibilityFactor: 1 + mode: readwrite + name: innodb_sync_array_size + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: '30' + divisibilityFactor: 1 + mode: readwrite + name: innodb_sync_spin_loops + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: innodb_table_locks + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: block + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_tcn_cache_level + optional: .* + restart: true + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_thread_concurrency + optional: '[0-1000]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: innodb_thread_sleep_delay + optional: '[0-1000000]' + restart: false + unit: INT + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: innodb_write_io_threads + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: '3' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_scn_history_interval + optional: '[1-10]' + restart: false + unit: INT + - defaultValue: '7' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_scn_history_keep_days + optional: '[1-30]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_scn_history_task_enabled + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '7200' + divisibilityFactor: 1 + mode: readwrite + name: interactive_timeout + optional: '[10-86400]' + restart: false + unit: INT + - defaultValue: '{LEAST(DBInstanceClassMemory/1048576*128, 262144)}' + divisibilityFactor: 1 + mode: readwrite + name: join_buffer_size + optional: '[128-4294967295]' + restart: false + unit: INT + - defaultValue: '16777216' + divisibilityFactor: 1 + mode: readwrite + name: key_buffer_size + optional: .* + restart: true + unit: INT + - defaultValue: '300' + divisibilityFactor: 100 + mode: readwrite + name: key_cache_age_threshold + optional: '[100-4294967295]' + restart: false + unit: INT + - defaultValue: '1024' + divisibilityFactor: 512 + mode: readwrite + name: key_cache_block_size + optional: '[512-16384]' + restart: false + unit: B + - defaultValue: '100' + divisibilityFactor: 1 + mode: readwrite + name: key_cache_division_limit + optional: '[1-100]' + restart: false + unit: INT + - defaultValue: en_US + divisibilityFactor: 0 + mode: readwrite + name: lc_time_names + optional: '[ja_JP|pt_BR|en_US]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: local_infile + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '1800' + divisibilityFactor: 1 + mode: readwrite + name: lock_wait_timeout + optional: '[1-1073741824]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 0 + mode: readwrite + name: log_bin_use_v1_row_events + optional: '[0|1]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: log_bin_trust_function_creators + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '2' + divisibilityFactor: 1 + mode: readwrite + name: log_error_verbosity + optional: '[1-3]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: log_queries_not_using_indexes + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 0 + mode: readwrite + name: log_slave_updates + optional: .* + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: log_slow_admin_statements + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: log_slow_slave_statements + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: log_throttle_queries_not_using_indexes + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 6 + mode: readwrite + name: long_query_time + optional: '[0.1-31536000]' + restart: false + unit: DOUBLE + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_ccl_max_waiting_count + optional: '[0-9223372036854775807]' + restart: false + unit: INT + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: loose_ccl_queue_bucket_count + optional: '[1-64]' + restart: false + unit: INT + - defaultValue: '64' + divisibilityFactor: 1 + mode: readwrite + name: loose_ccl_queue_bucket_size + optional: '[1-4096]' + restart: false + unit: INT + - defaultValue: '86400' + divisibilityFactor: 1 + mode: readwrite + name: loose_ccl_wait_timeout + optional: '[1-31536000]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_consensus_auto_leader_transfer + optional: .* + restart: true + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_consensus_auto_reset_match_index + optional: .* + restart: true + unit: STRING + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_election_timeout + optional: .* + restart: true + unit: INT + - defaultValue: '8' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_io_thread_cnt + optional: .* + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_consensus_large_trx + optional: .* + restart: true + unit: STRING + - defaultValue: '536870912' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_log_cache_size + optional: .* + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_max_delay_index + optional: .* + restart: true + unit: INT + - defaultValue: '20971520' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_max_log_size + optional: .* + restart: true + unit: INT + - defaultValue: '131072' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_max_packet_size + optional: .* + restart: true + unit: INT + - defaultValue: '268435456' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_prefetch_cache_size + optional: .* + restart: true + unit: INT + - defaultValue: '8' + divisibilityFactor: 1 + mode: readwrite + name: loose_consensus_worker_thread_cnt + optional: .* + restart: true + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: loose_implicit_primary_key + optional: '[0-1]' + restart: false + unit: INT + - defaultValue: '86400' + divisibilityFactor: 1 + mode: readwrite + name: loose_information_schema_stats_expiry + optional: '[0-31536000]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_buffer_pool_in_core_file + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: '64' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_doublewrite_pages + optional: '[0-512]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_lizard_stat_enabled + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_log_compressed_pages + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_log_optimize_ddl + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '4096' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_log_write_ahead_size + optional: '[512-16384]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_multi_blocks_enabled + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_innodb_numa_interleave + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_parallel_read_threads + optional: '[0-256]' + restart: false + unit: INT + - defaultValue: '1800' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_undo_retention + optional: '[0-172800]' + restart: false + unit: INT + - defaultValue: '1024' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_undo_space_reserved_size + optional: '[0-20480]' + restart: false + unit: INT + - defaultValue: '102400' + divisibilityFactor: 1 + mode: readwrite + name: loose_innodb_undo_space_supremum_size + optional: '[0-524288]' + restart: false + unit: INT + - defaultValue: TempTable + divisibilityFactor: 0 + mode: readwrite + name: loose_internal_tmp_mem_storage_engine + optional: '[TempTable|MEMORY]' + restart: false + unit: STRING + - defaultValue: index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on + divisibilityFactor: 0 + mode: readwrite + name: loose_optimizer_switch + optional: .* + restart: false + unit: STRING + - defaultValue: enabled=off,one_line=off + divisibilityFactor: 0 + mode: readwrite + name: loose_optimizer_trace + optional: .* + restart: false + unit: STRING + - defaultValue: greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on + divisibilityFactor: 0 + mode: readwrite + name: loose_optimizer_trace_features + optional: .* + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_point_lock_rwlock_enabled + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_accounts_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_stages_current + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_stages_history + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_stages_history_long + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_statements_current + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_statements_history + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_statements_history_long + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_transactions_current + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_transactions_history + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_transactions_history_long + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_waits_current + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_waits_history + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_events_waits_history_long + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_global_instrumentation + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_statements_digest + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_consumer_thread_instrumentation + optional: '[OFF|ON]' + restart: false + unit: STRING + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_digests_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_error_size + optional: '[0-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_stages_history_long_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_stages_history_size + optional: '[-1-1024]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_statements_history_long_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_statements_history_size + optional: '[-1-1024]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_transactions_history_long_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_transactions_history_size + optional: '[-1-1024]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_waits_history_long_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_events_waits_history_size + optional: '[-1-1024]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_hosts_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '''%%%%=OFF''' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema_instrument + optional: .* + restart: true + unit: STRING + - defaultValue: '''wait/lock/metadata/sql/mdl=ON''' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance-schema_instrument + optional: .* + restart: true + unit: STRING + - defaultValue: '''memory/%%%%=COUNTED''' + divisibilityFactor: 0 + mode: readwrite + name: loose_performance_schema-instrument + optional: .* + restart: true + unit: STRING + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_cond_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_cond_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_digest_length + optional: '[0-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_digest_sample_age + optional: '[0-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_file_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_file_handles + optional: '[-1-32768]' + restart: true + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_file_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_index_stat + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_memory_classes + optional: '[0-1024]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_metadata_locks + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_mutex_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_mutex_instances + optional: '[-1-104857600]' + restart: true + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_prepared_statements_instances + optional: '[-1-4194304]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_program_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_rwlock_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_rwlock_instances + optional: '[-1-104857600]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_socket_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_socket_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_sql_text_length + optional: '[0-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_stage_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_statement_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_statement_stack + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_table_handles + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '1000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_table_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_table_lock_stat + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_thread_classes + optional: '[0-256]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_max_thread_instances + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_session_connect_attrs_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_setup_actors_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_setup_objects_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: loose_performance_schema_users_size + optional: '[-1-1048576]' + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 1 + mode: readwrite + name: loose_persist_binlog_to_redo + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '1048576' + divisibilityFactor: 1 + mode: readwrite + name: loose_persist_binlog_to_redo_size_limit + optional: '[0-10485760]' + restart: false + unit: STRING + - defaultValue: '16777216' + divisibilityFactor: 1 + mode: readwrite + name: loose_rds_audit_log_buffer_size + optional: '[16777216-104857600]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_rds_audit_log_enabled + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '8192' + divisibilityFactor: 1 + mode: readwrite + name: loose_rds_audit_log_event_buffer_size + optional: '[0-32768]' + restart: false + unit: INT + - defaultValue: '100000' + divisibilityFactor: 1 + mode: readwrite + name: loose_rds_audit_log_row_limit + optional: '[0-100000000]' + restart: false + unit: INT + - defaultValue: MYSQL_V1 + divisibilityFactor: 0 + mode: readwrite + name: loose_rds_audit_log_version + optional: '[MYSQL_V1|MYSQL_V3]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: loose_recovery_apply_binlog + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '3000' + divisibilityFactor: 1 + mode: readwrite + name: loose_replica_read_timeout + optional: '[0-2147483647]' + restart: true + unit: INT + - defaultValue: '' + divisibilityFactor: 0 + mode: readwrite + name: loose_rotate_log_table_last_name + optional: .* + restart: true + unit: STRING + - defaultValue: '"*"' + divisibilityFactor: 0 + mode: readwrite + name: loose_session_track_system_variables + optional: .* + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: loose_session_track_transaction_info + optional: '[STATE|CHARACTERISTICS|OFF]' + restart: false + unit: STRING + - defaultValue: '8' + divisibilityFactor: 1 + mode: readwrite + name: loose_slave_parallel_workers + optional: '[0-1024]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 0 + mode: readwrite + name: low_priority_updates + optional: '[0|1]' + restart: false + unit: STRING + - defaultValue: '1' + divisibilityFactor: 0 + mode: readwrite + name: lower_case_table_names + optional: '[0|1]' + restart: true + unit: STRING + - defaultValue: TABLE + divisibilityFactor: 0 + mode: readwrite + name: master_info_repository + optional: '[TABLE|FILE]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: master_verify_checksum + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '1073741824' + divisibilityFactor: 1 + mode: readwrite + name: max_allowed_packet + optional: '[16384-1073741824]' + restart: false + unit: INT + - defaultValue: '18446744073709551615' + divisibilityFactor: 1 + mode: readwrite + name: max_binlog_cache_size + optional: '[4096-18446744073709547520]' + restart: false + unit: INT + - defaultValue: '18446744073709551615' + divisibilityFactor: 4096 + mode: readwrite + name: max_binlog_stmt_cache_size + optional: '[4096-18446744073709547520]' + restart: false + unit: INT + - defaultValue: '65536' + divisibilityFactor: 1 + mode: readwrite + name: max_connect_errors + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '5532' + divisibilityFactor: 1 + mode: readwrite + name: max_connections + optional: .* + restart: true + unit: INT + - defaultValue: '1024' + divisibilityFactor: 1 + mode: readwrite + name: max_error_count + optional: '[0-65535]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: max_execution_time + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '67108864' + divisibilityFactor: 1024 + mode: readwrite + name: max_heap_table_size + optional: '[16384-1844674407370954752]' + restart: false + unit: INT + - defaultValue: '18446744073709551615' + divisibilityFactor: 1 + mode: readwrite + name: max_join_size + optional: '[1-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '4096' + divisibilityFactor: 1 + mode: readwrite + name: max_length_for_sort_data + optional: '[0-838860]' + restart: false + unit: INT + - defaultValue: '65536' + divisibilityFactor: 1 + mode: readwrite + name: max_points_in_geometry + optional: '[3-1048576]' + restart: false + unit: INT + - defaultValue: '16382' + divisibilityFactor: 1 + mode: readwrite + name: max_prepared_stmt_count + optional: '[0-1048576]' + restart: false + unit: INT + - defaultValue: '18446744073709551615' + divisibilityFactor: 1 + mode: readwrite + name: max_seeks_for_key + optional: '[1-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '1024' + divisibilityFactor: 1 + mode: readwrite + name: max_sort_length + optional: '[4-8388608]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: max_sp_recursion_depth + optional: '[0-255]' + restart: false + unit: INT + - defaultValue: '5000' + divisibilityFactor: 1 + mode: readwrite + name: max_user_connections + optional: .* + restart: true + unit: INT + - defaultValue: '102400' + divisibilityFactor: 1 + mode: readwrite + name: max_write_lock_count + optional: '[1-102400]' + restart: false + unit: INT + - defaultValue: '0' + divisibilityFactor: 1 + mode: readwrite + name: min_examined_row_limit + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '262144' + divisibilityFactor: 1 + mode: readwrite + name: myisam_sort_buffer_size + optional: '[262144-16777216]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: mysql_native_password_proxy_users + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '16384' + divisibilityFactor: 1024 + mode: readwrite + name: net_buffer_length + optional: '[1024-1048576]' + restart: false + unit: INT + - defaultValue: '30' + divisibilityFactor: 1 + mode: readwrite + name: net_read_timeout + optional: '[1-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '10' + divisibilityFactor: 1 + mode: readwrite + name: net_retry_count + optional: '[1-4294967295]' + restart: false + unit: INT + - defaultValue: '60' + divisibilityFactor: 1 + mode: readwrite + name: net_write_timeout + optional: '[1-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '2' + divisibilityFactor: 1 + mode: readwrite + name: ngram_token_size + optional: '[0-20]' + restart: true + unit: int + - defaultValue: '65535' + divisibilityFactor: 1 + mode: readwrite + name: open_files_limit + optional: '[1-2147483647]' + restart: true + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: opt_indexstat + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: opt_tablestat + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '1' + divisibilityFactor: 0 + mode: readwrite + name: optimizer_prune_level + optional: '[0|1]' + restart: false + unit: STRING + - defaultValue: '62' + divisibilityFactor: 1 + mode: readwrite + name: optimizer_search_depth + optional: '[0-62]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: optimizer_trace_limit + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '1048576' + divisibilityFactor: 1 + mode: readwrite + name: optimizer_trace_max_mem_size + optional: '[0-4294967295]' + restart: false + unit: INT + - defaultValue: '-1' + divisibilityFactor: 1 + mode: readwrite + name: optimizer_trace_offset + optional: '[-2147483648-2147483647]' + restart: false + unit: INT + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: performance_schema + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: '32768' + divisibilityFactor: 1 + mode: readwrite + name: preload_buffer_size + optional: '[1024-1073741824]' + restart: false + unit: INT + - defaultValue: '8192' + divisibilityFactor: 1024 + mode: readwrite + name: query_alloc_block_size + optional: '[1024-16384]' + restart: false + unit: INT + - defaultValue: '8192' + divisibilityFactor: 1024 + mode: readwrite + name: query_prealloc_size + optional: '[8192-1048576]' + restart: false + unit: INT + - defaultValue: '4096' + divisibilityFactor: 1 + mode: readwrite + name: range_alloc_block_size + optional: '[4096-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '8388608' + divisibilityFactor: 1 + mode: readwrite + name: range_optimizer_max_mem_size + optional: '[0-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '{LEAST(DBInstanceClassMemory/1048576*128, 262144)}' + divisibilityFactor: 1 + mode: readwrite + name: read_buffer_size + optional: '[8200-2147479552]' + restart: false + unit: INT + - defaultValue: '442368' + divisibilityFactor: 1 + mode: readwrite + name: read_rnd_buffer_size + optional: .* + restart: true + unit: INT + - defaultValue: TABLE + divisibilityFactor: 0 + mode: readwrite + name: relay_log_info_repository + optional: '[TABLE|FILE]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: relay_log_purge + optional: .* + restart: true + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: relay_log_recovery + optional: '[ON|OFF]' + restart: true + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: replicate_same_server_id + optional: .* + restart: true + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: session_track_gtids + optional: '[OFF|OWN_GTID|ALL_GTIDS]' + restart: false + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: session_track_schema + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: session_track_state_change + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: sha256_password_proxy_users + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: show_old_temporals + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: skip_slave_start + optional: .* + restart: true + unit: STRING + - defaultValue: 'ON' + divisibilityFactor: 0 + mode: readwrite + name: skip_ssl + optional: .* + restart: true + unit: STRING + - defaultValue: strict + divisibilityFactor: 0 + mode: readwrite + name: slave_exec_mode + optional: strict + restart: false + unit: STRING + - defaultValue: '4' + divisibilityFactor: 1 + mode: readwrite + name: slave_net_timeout + optional: '[15-300]' + restart: false + unit: INT + - defaultValue: LOGICAL_CLOCK + divisibilityFactor: 0 + mode: readwrite + name: slave_parallel_type + optional: '[DATABASE|LOGICAL_CLOCK]' + restart: true + unit: STRING + - defaultValue: '1073741824' + divisibilityFactor: 1 + mode: readwrite + name: slave_pending_jobs_size_max + optional: .* + restart: true + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: slave_sql_verify_checksum + optional: .* + restart: true + unit: STRING + - defaultValue: '' + divisibilityFactor: 0 + mode: readwrite + name: slave_type_conversions + optional: '[s*|ALL_LOSSY|ALL_NON_LOSSY|ALL_SIGNED|ALL_UNSIGNED]' + restart: true + unit: STRING + - defaultValue: '2' + divisibilityFactor: 1 + mode: readwrite + name: slow_launch_time + optional: '[1-1024]' + restart: false + unit: INT + - defaultValue: 'OFF' + divisibilityFactor: 0 + mode: readwrite + name: slow_query_log + optional: '[ON|OFF]' + restart: false + unit: STRING + - defaultValue: '868352' + divisibilityFactor: 1 + mode: readwrite + name: sort_buffer_size + optional: '[32768-4294967295]' + restart: false + unit: INT + - defaultValue: NO_ENGINE_SUBSTITUTION + divisibilityFactor: 0 + mode: readwrite + name: sql_mode + optional: (s*|REAL_AS_FLOAT|PIPES_AS_CONCAT|ANSI_QUOTES|IGNORE_SPACE|ONLY_FULL_GROUP_BY|NO_UNSIGNED_SUBTRACTION|NO_DIR_IN_CREATE|ANSI|NO_AUTO_VALUE_ON_ZERO|NO_BACKSLASH_ESCAPES|STRICT_TRANS_TABLES|STRICT_ALL_TABLES|NO_ZERO_IN_DATE|NO_ZERO_DATE|ALLOW_INVALID_DATES|ERROR_FOR_DIVISION_BY_ZERO|TRADITIONAL|HIGH_NOT_PRECEDENCE|NO_ENGINE_SUBSTITUTION|PAD_CHAR_TO_FULL_LENGTH)(,REAL_AS_FLOAT|,PIPES_AS_CONCAT|,ANSI_QUOTES|,IGNORE_SPACE|,ONLY_FULL_GROUP_BY|,NO_UNSIGNED_SUBTRACTION|,NO_DIR_IN_CREATE|,ANSI|,NO_AUTO_VALUE_ON_ZERO|,NO_BACKSLASH_ESCAPES|,STRICT_TRANS_TABLES|,STRICT_ALL_TABLES|,NO_ZERO_IN_DATE|,NO_ZERO_DATE|,ALLOW_INVALID_DATES|,ERROR_FOR_DIVISION_BY_ZERO|,TRADITIONAL|,HIGH_NOT_PRECEDENCE|,NO_ENGINE_SUBSTITUTION|,PAD_CHAR_TO_FULL_LENGTH)* + restart: false + unit: STRING + - defaultValue: '256' + divisibilityFactor: 1 + mode: readwrite + name: stored_program_cache + optional: '[16-524288]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: sync_binlog + optional: .* + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: sync_master_info + optional: '[0-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '1' + divisibilityFactor: 1 + mode: readwrite + name: sync_relay_log + optional: .* + restart: true + unit: INT + - defaultValue: '10000' + divisibilityFactor: 1 + mode: readwrite + name: sync_relay_log_info + optional: '[0-18446744073709551615]' + restart: false + unit: INT + - defaultValue: '{LEAST(DBInstanceClassMemory/1073741824*512, 2048)}' + divisibilityFactor: 1 + mode: readwrite + name: table_definition_cache + optional: '[400-524288]' + restart: false + unit: INT + - defaultValue: '{LEAST(DBInstanceClassMemory/1073741824*512, 8192)}' + divisibilityFactor: 1 + mode: readwrite + name: table_open_cache + optional: '[1-524288]' + restart: false + unit: INT + - defaultValue: '16' + divisibilityFactor: 1 + mode: readwrite + name: table_open_cache_instances + optional: '[1-64]' + restart: true + unit: INT + - defaultValue: '1073741824' + divisibilityFactor: 1 + mode: readwrite + name: temptable_max_ram + optional: '[2097152-107374182400]' + restart: false + unit: INT + - defaultValue: '100' + divisibilityFactor: 1 + mode: readwrite + name: thread_cache_size + optional: '[0-16384]' + restart: false + unit: INT + - defaultValue: '262144' + divisibilityFactor: 1024 + mode: readwrite + name: thread_stack + optional: '[131072-2147483647]' + restart: true + unit: INT + - defaultValue: TLSv1,TLSv1.1,TLSv1.2 + divisibilityFactor: 0 + mode: readwrite + name: tls_version + optional: '[TLSv1,TLSv1.1,TLSv1.2|TLSv1,TLSv1.1|TLSv1.2]' + restart: true + unit: STRING + - defaultValue: '2097152' + divisibilityFactor: 1 + mode: readwrite + name: tmp_table_size + optional: '[262144-134217728]' + restart: false + unit: INT + - defaultValue: '8192' + divisibilityFactor: 1024 + mode: readwrite + name: transaction_alloc_block_size + optional: '[1024-131072]' + restart: false + unit: INT + - defaultValue: REPEATABLE-READ + divisibilityFactor: 0 + mode: readwrite + name: transaction_isolation + optional: '[READ-UNCOMMITTED|READ-COMMITTED|REPEATABLE-READ|SERIALIZABLE]' + restart: false + unit: STRING + - defaultValue: '4096' + divisibilityFactor: 1024 + mode: readwrite + name: transaction_prealloc_size + optional: '[1024-131072]' + restart: false + unit: INT + - defaultValue: XXHASH64 + divisibilityFactor: 1 + mode: readwrite + name: transaction_write_set_extraction + optional: '[OFF|MURMUR32|XXHASH64]' + restart: false + unit: STRING + - defaultValue: 'YES' + divisibilityFactor: 0 + mode: readwrite + name: updatable_views_with_limit + optional: '[YES|NO]' + restart: false + unit: STRING + - defaultValue: '28800' + divisibilityFactor: 1 + mode: readwrite + name: wait_timeout + optional: '[1-31536000]' + restart: false + unit: INT diff --git a/charts/polardbx-operator/templates/webhook/admission-webhook-configuration.yaml b/charts/polardbx-operator/templates/webhook/admission-webhook-configuration.yaml index 37dcb40..a2e0f35 100644 --- a/charts/polardbx-operator/templates/webhook/admission-webhook-configuration.yaml +++ b/charts/polardbx-operator/templates/webhook/admission-webhook-configuration.yaml @@ -63,6 +63,25 @@ webhooks: resources: - polardbxparameters scope: "Namespaced" +- admissionReviewVersions: + - "v1" + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/admission.polardbx.aliyun.com/v1/validate-polardbx-aliyun-com-v1-polardbxbackup + name: "polardbxbackup-validate.polardbx.aliyun.com" + sideEffects: None + rules: + - apiGroups: + - polardbx.aliyun.com + apiVersions: + - v1 + operations: + - CREATE + resources: + - polardbxbackups + scope: "Namespaced" --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration diff --git a/charts/polardbx-operator/values.yaml b/charts/polardbx-operator/values.yaml index 2a052de..dcb2a1d 100644 --- a/charts/polardbx-operator/values.yaml +++ b/charts/polardbx-operator/values.yaml @@ -11,9 +11,10 @@ images: probeProxy: probe-proxy polardbxExporter: polardbx-exporter polardbxInit: polardbx-init + polardbxJob: polardbx-job # Default image tag. Use app version if not specified or 'latest' if useLatestImage is true. -imageTag: +imageTag: v1.4.0-beta # Uses the latest images for operator components. useLatestImage: false @@ -28,9 +29,9 @@ imagePullSecrets: clusterDefaults: version: latest images: - galaxysql: galaxysql - galaxyengine: galaxyengine - galaxycdc: galaxycdc + galaxysql: polardbx-sql + galaxyengine: polardbx-engine + galaxycdc: polardbx-cdc # Configuration of Kubernetes hosts. node: @@ -99,7 +100,7 @@ hostPathFileService: sinks: - name: default type: oss - endpoint: xxx + endpoint: xxx accessKey: xxx accessSecret: xxxxx bucket: xxx diff --git a/cmd/polardbx-filestream-cli/main.go b/cmd/polardbx-filestream-cli/main.go index d44f555..55bc88e 100644 --- a/cmd/polardbx-filestream-cli/main.go +++ b/cmd/polardbx-filestream-cli/main.go @@ -137,6 +137,11 @@ func main() { if err != nil { printErrAndExit(err, metadata) } + } else if strings.HasPrefix(strings.ToLower(action), "list") { + _, err := client.List(os.Stdout, metadata) + if err != nil { + printErrAndExit(err, metadata) + } } else { printErrAndExit(errors.New("invalid action"), metadata) } diff --git a/cmd/polardbx-hpfs/main.go b/cmd/polardbx-hpfs/main.go index dfeea49..020756e 100644 --- a/cmd/polardbx-hpfs/main.go +++ b/cmd/polardbx-hpfs/main.go @@ -21,6 +21,8 @@ import ( "flag" "fmt" "github.com/alibaba/polardbx-operator/pkg/hpfs" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" "github.com/alibaba/polardbx-operator/pkg/hpfs/discovery" "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" "github.com/alibaba/polardbx-operator/pkg/hpfs/local" @@ -30,6 +32,7 @@ import ( "os" "strconv" "strings" + "time" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -192,7 +195,7 @@ func startHpfs() { func startFileStreamServer() { go func() { log.Info("Start filestream server") - fileServer := filestream.NewFileServer("0.0.0.0", filestreamServerPort, filestreamRootPath, filestream.GlobalFlowControl) + fileServer := filestream.NewFileServer("", filestreamServerPort, filestreamRootPath, filestream.GlobalFlowControl) err := fileServer.Start() if err != nil { log.Error(err, "Failed to start file server") @@ -201,6 +204,21 @@ func startFileStreamServer() { }() } +func startLoadConfig() { + config.InitConfig() + go func() { + for { + time.Sleep(70 * time.Second) + config.ReloadConfig() + } + }() +} + +func startAllWatchers() { + backupbinlog.SetLocalFilestreamSeverPort(filestreamServerPort) + backupbinlog.StartAllWatchers() +} + func main() { // Grab the file lock. if len(lockFile) > 0 { @@ -223,6 +241,10 @@ func main() { }) flowControl.Start() filestream.GlobalFlowControl = flowControl + //init config from configmap + startLoadConfig() + //for backup binlog periodic watch binlog + startAllWatchers() // start file stream server startFileStreamServer() // Start hpfs. diff --git a/cmd/polardbx-job/main.go b/cmd/polardbx-job/main.go new file mode 100644 index 0000000..0a4c9a9 --- /dev/null +++ b/cmd/polardbx-job/main.go @@ -0,0 +1,76 @@ +package main + +import ( + "flag" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/pitr" + "os" + "os/signal" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "syscall" +) + +type JobType string + +const ( + PitrHeartbeatJobType JobType = "PitrHeartbeat" + PitrPrepareBinlogs JobType = "PitrPrepareBinlogs" +) + +var ( + jobType string +) + +func init() { + flag.StringVar(&jobType, "job-type", "PitrHeartbeat", "the job type") + flag.Parse() +} + +func main() { + log := zap.New(zap.UseDevMode(true)) + log.Info(fmt.Sprintf("jobType=%s", jobType)) + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT) + exitActions := make([]func(), 0) + waitActions := make([]func(), 0) + switch JobType(jobType) { + case PitrHeartbeatJobType: + heartbeat := backupbinlog.NewHeatBeat() + heartbeat.Do() + exitActions = append(exitActions, func() { + heartbeat.Cancel() + }) + waitActions = append(waitActions, func() { + heartbeat.Wait() + }) + case PitrPrepareBinlogs: + waitGroup := pitr.RunAsync() + exitActions = append(exitActions, func() { + pitr.Exit() + }) + waitActions = append(waitActions, func() { + waitGroup.Wait() + }) + default: + panic("invalid job type") + } + + go func() { + select { + case <-ch: + defer os.Exit(1) + for _, exitAction := range exitActions { + exitAction() + } + for _, waitAction := range waitActions { + waitAction() + } + } + }() + + for _, waitAction := range waitActions { + waitAction() + } + +} diff --git a/docs/en/index.md b/docs/en/index.md deleted file mode 100644 index 6d3f665..0000000 --- a/docs/en/index.md +++ /dev/null @@ -1 +0,0 @@ -WIP \ No newline at end of file diff --git a/go.mod b/go.mod index 7a2b6fb..9a7a205 100644 --- a/go.mod +++ b/go.mod @@ -48,9 +48,13 @@ require ( ) require ( + github.com/eapache/queue v1.1.0 github.com/itchyny/timefmt-go v0.1.4 github.com/onsi/ginkgo v1.16.5 + github.com/pkg/errors v0.9.1 github.com/prometheus/common v0.26.0 + github.com/robfig/cron v1.2.0 + go.uber.org/atomic v1.7.0 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 golang.org/x/text v0.7.0 gomodules.xyz/jsonpatch/v2 v2.2.0 @@ -90,7 +94,6 @@ require ( github.com/ncw/directio v1.0.5 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/procfs v0.6.0 // indirect @@ -99,7 +102,6 @@ require ( github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/net v0.7.0 // indirect diff --git a/go.sum b/go.sum index 3924882..747c2ce 100644 --- a/go.sum +++ b/go.sum @@ -176,6 +176,7 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= @@ -703,6 +704,8 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= diff --git a/hack/make-rules/lib/build_env.py b/hack/make-rules/lib/build_env.py index 5141dd0..7b0f426 100644 --- a/hack/make-rules/lib/build_env.py +++ b/hack/make-rules/lib/build_env.py @@ -107,6 +107,7 @@ def _parent_path(path: str, parent_idx: int) -> str: golang=BuildGolangEnv(goflags=''), targets=[ BuildTarget(target='cmd/polardbx-hpfs', binary='polardbx-hpfs', image='polardbx-hpfs', image_build_path=None), + BuildTarget(target='cmd/polardbx-job', binary='polardbx-job', image='polardbx-job', image_build_path=None), BuildTarget(target='cmd/polardbx-init', binary='polardbx-init', image='polardbx-init', image_build_path=None), BuildTarget(target='cmd/polardbx-exporter', binary='polardbx-exporter', image='polardbx-exporter',image_build_path=None), BuildTarget(target='cmd/probe-proxy', binary='probe-proxy', image='probe-proxy', image_build_path=None), diff --git a/hack/manifest.sh b/hack/manifest.sh index b033cef..f4f263a 100755 --- a/hack/manifest.sh +++ b/hack/manifest.sh @@ -38,7 +38,7 @@ if [ ! -n "$2" ]; then exit fi -TARGETS="xstore-tools polardbx-operator probe-proxy polardbx-exporter polardbx-init polardbx-hpfs polardbx-logstash" +TARGETS="xstore-tools polardbx-operator probe-proxy polardbx-exporter polardbx-init polardbx-hpfs polardbx-job polardbx-logstash" # 构建镜像 make build REPO="${REGISTRY}" ARCH=arm64 TAG="${TAG}"-arm64 diff --git a/pkg/binlogtool/algo/locate_heartbeat.go b/pkg/binlogtool/algo/locate_heartbeat.go index a5f343b..404ba49 100644 --- a/pkg/binlogtool/algo/locate_heartbeat.go +++ b/pkg/binlogtool/algo/locate_heartbeat.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. diff --git a/pkg/binlogtool/algo/locate_heartbeat_test.go b/pkg/binlogtool/algo/locate_heartbeat_test.go index 1ab3088..30097cd 100644 --- a/pkg/binlogtool/algo/locate_heartbeat_test.go +++ b/pkg/binlogtool/algo/locate_heartbeat_test.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. @@ -19,6 +17,7 @@ limitations under the License. package algo import ( + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" "os" "testing" diff --git a/pkg/binlogtool/algo/seek_consistent_point.go b/pkg/binlogtool/algo/seek_consistent_point.go index abaf893..932ace8 100644 --- a/pkg/binlogtool/algo/seek_consistent_point.go +++ b/pkg/binlogtool/algo/seek_consistent_point.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. @@ -19,6 +17,8 @@ limitations under the License. package algo import ( + "bytes" + "encoding/binary" "fmt" "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" "sync" @@ -258,3 +258,36 @@ func NewSeekConsistentPoint(txEventParsers map[string]tx.TransactionEventParser, heartbeatTxid: heartbeatTxid, } } + +func SerializeCpResult(recoverableTxs []uint64, borders map[string]binlog.EventOffset) ([]byte, error) { + byteBuf := &bytes.Buffer{} + if err := binary.Write(byteBuf, binary.LittleEndian, uint32(len(recoverableTxs))); err != nil { + return nil, err + } + for _, txid := range recoverableTxs { + if err := binary.Write(byteBuf, binary.LittleEndian, txid); err != nil { + return nil, err + } + } + if err := binary.Write(byteBuf, binary.LittleEndian, uint16(len(borders))); err != nil { + return nil, err + } + for streamName, offset := range borders { + if err := binary.Write(byteBuf, binary.LittleEndian, uint8(len(streamName))); err != nil { + return nil, err + } + if _, err := byteBuf.Write([]byte(streamName)); err != nil { + return nil, err + } + if err := binary.Write(byteBuf, binary.LittleEndian, uint8(len(offset.File))); err != nil { + return nil, err + } + if _, err := byteBuf.Write([]byte(offset.File)); err != nil { + return nil, err + } + if err := binary.Write(byteBuf, binary.LittleEndian, offset.Offset); err != nil { + return nil, err + } + } + return byteBuf.Bytes(), nil +} diff --git a/pkg/binlogtool/binlog/write.go b/pkg/binlogtool/binlog/write.go index a7f7256..3bba4b6 100644 --- a/pkg/binlogtool/binlog/write.go +++ b/pkg/binlogtool/binlog/write.go @@ -58,10 +58,9 @@ func (b *rawLogEventWriter) Write(ev event.LogEvent) error { algo := data[len(data)-1] b.binlogChecksumAlgorithm = spec.BinlogChecksumAlgorithm(algo) } - b.parseFirstEvent = false } - if b.binlogChecksumAlgorithm == spec.BinlogChecksumAlgorithmCrc32 { + if b.binlogChecksumAlgorithm == spec.BinlogChecksumAlgorithmCrc32 || b.parseFirstEvent { // Configure and write checksum var checksum uint32 checksum = crc32.Update(checksum, crc32.IEEETable, headerBytes) @@ -69,6 +68,8 @@ func (b *rawLogEventWriter) Write(ev event.LogEvent) error { binary.Write(b.w, binary.LittleEndian, checksum) } + b.parseFirstEvent = false + return nil } diff --git a/pkg/binlogtool/cmd/seekcp.go b/pkg/binlogtool/cmd/seekcp.go index 3b13b64..2b9c885 100644 --- a/pkg/binlogtool/cmd/seekcp.go +++ b/pkg/binlogtool/cmd/seekcp.go @@ -21,7 +21,6 @@ package cmd import ( "bufio" "compress/gzip" - "encoding/binary" "errors" "fmt" "io" @@ -256,37 +255,11 @@ func writeRecoverableConsistentPoint(recoverableTxs []uint64, borders map[string return err } defer gw.Close() - - if err := binary.Write(gw, binary.LittleEndian, uint32(len(recoverableTxs))); err != nil { - return err - } - for _, txid := range recoverableTxs { - if err := binary.Write(gw, binary.LittleEndian, txid); err != nil { - return err - } - } - - if err := binary.Write(gw, binary.LittleEndian, uint16(len(borders))); err != nil { + bytes, err := algo.SerializeCpResult(recoverableTxs, borders) + if err != nil { return err } - for streamName, offset := range borders { - if err := binary.Write(gw, binary.LittleEndian, uint8(len(streamName))); err != nil { - return err - } - if _, err := gw.Write([]byte(streamName)); err != nil { - return err - } - if err := binary.Write(gw, binary.LittleEndian, uint8(len(offset.File))); err != nil { - return err - } - if _, err := gw.Write([]byte(offset.File)); err != nil { - return err - } - if err := binary.Write(gw, binary.LittleEndian, offset.Offset); err != nil { - return err - } - } - + gw.Write(bytes) return nil } diff --git a/pkg/binlogtool/system/system.go b/pkg/binlogtool/system/system.go index 0b11a73..26fc80c 100644 --- a/pkg/binlogtool/system/system.go +++ b/pkg/binlogtool/system/system.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. diff --git a/pkg/binlogtool/tx/binary.go b/pkg/binlogtool/tx/binary.go index 8e8f74b..1d88869 100644 --- a/pkg/binlogtool/tx/binary.go +++ b/pkg/binlogtool/tx/binary.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. diff --git a/pkg/binlogtool/tx/event.go b/pkg/binlogtool/tx/event.go index d06c64b..a7f496b 100644 --- a/pkg/binlogtool/tx/event.go +++ b/pkg/binlogtool/tx/event.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. diff --git a/pkg/binlogtool/tx/parser.go b/pkg/binlogtool/tx/parser.go index ed305a2..e3d1e81 100644 --- a/pkg/binlogtool/tx/parser.go +++ b/pkg/binlogtool/tx/parser.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. @@ -221,6 +219,9 @@ func (s *transactionEventParser) processOne(h EventHandler) error { } case spec.XID_EVENT: + if txLogTableWriteRowsEvent == nil { + return nil + } beginEv, prepareEv, commitEv := &Event{ Raw: beginQueryEvent, File: beginQueryEventOffset.File, @@ -264,6 +265,7 @@ func (s *transactionEventParser) processOne(h EventHandler) error { if bytes.Equal(queryEvent.Query, []byte("BEGIN")) { beginQueryEvent = logEvent beginQueryEventOffset = offset + txLogTableWriteRowsEvent = nil } else { xid, t, err := ParseXIDAndType(queryEvent.Query) if err != nil { diff --git a/pkg/binlogtool/tx/parser_test.go b/pkg/binlogtool/tx/parser_test.go index 5b856a9..eb292a9 100644 --- a/pkg/binlogtool/tx/parser_test.go +++ b/pkg/binlogtool/tx/parser_test.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. @@ -21,6 +19,7 @@ package tx import ( "bufio" "fmt" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" "os" "testing" ) diff --git a/pkg/binlogtool/tx/xa.go b/pkg/binlogtool/tx/xa.go index 444f4f1..1a07737 100644 --- a/pkg/binlogtool/tx/xa.go +++ b/pkg/binlogtool/tx/xa.go @@ -1,5 +1,3 @@ -//go:build polardbx - /* Copyright 2022 Alibaba Group Holding Limited. diff --git a/pkg/binlogtool/utils/binary.go b/pkg/binlogtool/utils/binary.go index 182bced..f36efc5 100644 --- a/pkg/binlogtool/utils/binary.go +++ b/pkg/binlogtool/utils/binary.go @@ -24,15 +24,15 @@ import ( "golang.org/x/exp/constraints" ) -func forceConvert[T constraints.Integer | constraints.Float, R constraints.Integer | constraints.Float](x T) R { - return *(*R)(unsafe.Pointer(&x)) +func forceConvert[T constraints.Integer, R constraints.Integer](x T) R { + return R(x) } func init() { _ = endian.Native } -func ReadNumber[T constraints.Integer | constraints.Float](order binary.ByteOrder, data *T, bs []byte) { +func ReadNumber[T constraints.Integer](order binary.ByteOrder, data *T, bs []byte) { size := int(unsafe.Sizeof(T(0))) switch size { diff --git a/pkg/binlogtool/utils/binary_little.go b/pkg/binlogtool/utils/binary_little.go index 59bb55a..5e42372 100644 --- a/pkg/binlogtool/utils/binary_little.go +++ b/pkg/binlogtool/utils/binary_little.go @@ -37,6 +37,6 @@ func ReadNumberLittleEndianHack[T constraints.Integer | constraints.Float](data *data = (*(*[]T)(unsafe.Pointer(&bs)))[0] } -func ReadNumberBigEndianHack[T constraints.Integer | constraints.Float](data *T, bs []byte) { +func ReadNumberBigEndianHack[T constraints.Integer](data *T, bs []byte) { ReadNumber(binary.BigEndian, data, bs) } diff --git a/pkg/binlogtool/utils/binary_others.go b/pkg/binlogtool/utils/binary_others.go index 4c75ccd..1f43462 100644 --- a/pkg/binlogtool/utils/binary_others.go +++ b/pkg/binlogtool/utils/binary_others.go @@ -24,10 +24,10 @@ import ( "golang.org/x/exp/constraints" ) -func ReadNumberLittleEndianHack[T constraints.Integer | constraints.Float](data *T, bs []byte) { +func ReadNumberLittleEndianHack[T constraints.Integer](data *T, bs []byte) { ReadNumber(binary.BigEndian, data, bs) } -func ReadNumberBigEndianHack[T constraints.Integer | constraints.Float](data *T, bs []byte) { +func ReadNumberBigEndianHack[T constraints.Integer](data *T, bs []byte) { ReadNumber(binary.BigEndian, data, bs) } diff --git a/pkg/featuregate/featuregates.go b/pkg/featuregate/featuregates.go index 5bfe81a..b9271cc 100644 --- a/pkg/featuregate/featuregates.go +++ b/pkg/featuregate/featuregates.go @@ -67,7 +67,9 @@ var ( EnableGalaxyClusterMode = declareFeatureGate("EnableGalaxyCluster", true, false, "Enable cluster mode on galaxy store engine.") EnforceQoSGuaranteed = declareFeatureGate("EnforceQoSGuaranteed", false, false, "Enforce pod's QoS to Guaranteed.") ResetTrustIpsBeforeStart = declareFeatureGate("ResetTrustIpsBeforeStart", false, true, "Reset trust ips in CNs to avoid security problems.") - EnableXStoreWithPodService = declareFeatureGate("EnableXStoreWithPodService", true, false, "Use services for pods in xstore.") + EnableXStoreWithPodService = declareFeatureGate("EnableXStoreWithPodService", true, true, "Use services for pods in xstore.") + EnforceClusterIpXStorePod = declareFeatureGate("EnforceClusterIpXStorePod", false, false, "Use cluster ip services for pods in old xstore.") + EnableAutoRebuildFollower = declareFeatureGate("EnableAutoRebuildFollower", false, false, "Enable creating rebuild task for follower if it is unhealthy.") ) var extraFeatureGates []string diff --git a/pkg/hpfs/backupbinlog/action.go b/pkg/hpfs/backupbinlog/action.go new file mode 100644 index 0000000..b73a46e --- /dev/null +++ b/pkg/hpfs/backupbinlog/action.go @@ -0,0 +1,256 @@ +package backupbinlog + +import ( + "bufio" + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" + binlogEvent "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog/event" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog/spec" + . "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" + "github.com/google/uuid" + "io" + "k8s.io/apimachinery/pkg/util/net" + "os" + "strconv" + "sync" + "sync/atomic" + "time" +) + +const ( + BufferSizeBytes = 8 << 10 // 8KB + FilestreamIp = "127.0.0.1" + BinlogFilepathFormat = "%s/%s/%s/%s/%s/%s/%s/%s/%s/binlog-file/%s" + BinlogMetaFilepathFormat = "%s/%s/%s/%s/%s/%s/%s/%s/%s/binlog-meta/%s" + BatchSize = 1000 +) + +type Callback func(watcher *Watcher, file *BinlogFile) bool + +var filestreamPort int + +func SetLocalFilestreamSeverPort(port int) { + filestreamPort = port +} + +//BeforeUpload print some info. set RequestId +func BeforeUpload(w *Watcher, binlogFile *BinlogFile) bool { + w.uploadLogger = w.logger.WithValues("trace", uuid.New().String(), "filepath", binlogFile.Filepath) + infoJson, _ := json.Marshal(binlogFile) + fileInfo, err := os.Stat(binlogFile.Filepath) + if err != nil { + w.uploadLogger.Error(err, "failed to stat file") + return false + } + binlogFile.FileLastModifiedAt = fileInfo.ModTime() + binlogFile.Size = fileInfo.Size() + w.uploadLogger.Info("BeforeUpload", "binlogFile", string(infoJson)) + return true +} + +//AfterUpload print some info. set RequestId +func AfterUpload(w *Watcher, binlogFile *BinlogFile) bool { + w.uploadLogger = nil + return true +} + +//FetchStartIndex parse start index from the beginning of the binlog file +func FetchStartIndex(w *Watcher, binlogFile *BinlogFile) bool { + logger := w.uploadLogger.WithValues("action", "FetchStartIndex") + logger.Info("begin") + f, err := os.OpenFile(binlogFile.Filepath, os.O_RDONLY, os.ModePerm) + if err != nil { + logger.Error(err, "failed to open file") + return false + } + defer f.Close() + headBytes, err := ReadBytes(f, BufferSizeBytes) + if len(headBytes) > 0 { + binlogFile.StartIndex, binlogFile.EventTimestamp, err = GetBinlogFileBeginInfo(headBytes, binlogFile.Filename, binlogFile.BinlogChecksum) + if err != nil { + logger.Error(err, "failed to get binlog file begin info") + return false + } + logger.Info("success", "startIndex", binlogFile.StartIndex) + return true + } + logger.Error(err, "failed") + return false +} + +func GetBinlogFileBeginInfo(beginBytes []byte, filename string, binlogChecksum string) (uint64, uint64, error) { + opts := []binlog.LogEventScannerOption{ + binlog.WithBinlogFile(filename), + binlog.WithChecksumAlgorithm(binlogChecksum), + binlog.WithLogEventHeaderFilter(func(header binlogEvent.LogEventHeader) bool { + return header.EventTypeCode() == spec.PREVIOUS_CONSENSUS_INDEX_LOG_EVENT + }), + } + scanner, err := binlog.NewLogEventScanner(bufio.NewReader(bytes.NewReader(beginBytes)), opts...) + if err != nil { + return 0, 0, err + } + _, ev, err := scanner.Next() + if err != nil { + return 0, 0, err + } + consensusEvent, ok := ev.EventData().(*binlogEvent.PreviousConsensusIndexEvent) + if !ok { + return 0, 0, err + } + return consensusEvent.Index, uint64(ev.EventHeader().EventTimestamp()), nil +} + +// Upload read the file only once. do 1.compute sha256 2.get start consensus log index 3. upload the file finally. +func Upload(w *Watcher, binlogFile *BinlogFile) bool { + logger := w.uploadLogger.WithValues("action", "Upload") + if binlogFile.SinkType == config.SinkTypeNone { + w.uploadLogger.Info("skip") + return true + } + logger.Info("begin") + f, err := os.OpenFile(binlogFile.Filepath, os.O_RDONLY, os.ModePerm) + if err != nil { + logger.Error(err, "failed to open file") + return false + } + defer f.Close() + buf := make([]byte, BufferSizeBytes) + hash := sha256.New() + reader, writer := io.Pipe() + defer reader.Close() + defer writer.Close() + + var waitGroup sync.WaitGroup + var errValue atomic.Value + binlogFile.CreatedAt = time.Now() + binlogFile.UpdatedAt = binlogFile.CreatedAt + go func() { + defer reader.Close() + waitGroup.Add(1) + defer waitGroup.Done() + // upload mock + err := uploadBinlogFile(reader, binlogFile) + if err != nil { + logger.Error(err, "failed to upload remote") + errValue.Store(err) + } + }() + + for { + cnt, err := f.Read(buf) + if err != nil { + if net.IsProbableEOF(err) { + writer.Close() + break + } + logger.Error(err, "failed to read file") + return false + } + if cnt > 0 { + writer.Write(buf[:cnt]) + hash.Write(buf[:cnt]) + } + } + waitGroup.Wait() + if errValue.Load() != nil { + return false + } + hashBytes := hash.Sum(nil) + binlogFile.Sha256 = hex.EncodeToString(hashBytes) + err = uploadBinlogFileMeta(binlogFile) + if err != nil { + logger.Error(err, "failed to upload binlog meta") + return false + } + logger.Info("success", "sha256", binlogFile.Sha256, "size", binlogFile.Size) + return true +} + +//UploadRemote by filestream client +func uploadBinlogFile(reader io.Reader, binlogFile *BinlogFile) error { + //upload binlogfile meta + client := filestream.NewFileClient(FilestreamIp, filestreamPort, nil) + action := filestream.UploadOss + if binlogFile.SinkType == config.SinkTypeSftp { + action = filestream.UploadSsh + } + //upload binlogfile + binlogFileMetadata := filestream.ActionMetadata{ + Action: filestream.Action(action), + Filepath: getBinlogFilepath(binlogFile), + RequestId: uuid.New().String(), + Sink: binlogFile.SinkName, + OssBufferSize: strconv.FormatInt(binlogFile.Size, 10), + } + uploadedLen, err := client.Upload(reader, binlogFileMetadata) + if err != nil { + return err + } + if uploadedLen != binlogFile.Size { + return fmt.Errorf("not the same len contentSize=%d, uploadSize=%d", binlogFile.Size, uploadedLen) + } + return nil +} + +func uploadBinlogFileMeta(binlogFile *BinlogFile) error { + //upload binlogfile meta + client := filestream.NewFileClient(FilestreamIp, filestreamPort, nil) + action := filestream.UploadOss + if binlogFile.SinkType == config.SinkTypeSftp { + action = filestream.UploadSsh + } + binlogMetaJsonBytes, _ := json.Marshal(binlogFile) + binlogMetaFileMetadata := filestream.ActionMetadata{ + Action: filestream.Action(action), + Filepath: getBinlogMetaFilepath(binlogFile), + RequestId: uuid.New().String(), + Sink: binlogFile.SinkName, + OssBufferSize: fmt.Sprintf("%d", len(binlogMetaJsonBytes)), + } + reader := bytes.NewReader(binlogMetaJsonBytes) + uploadedLen, err := client.Upload(reader, binlogMetaFileMetadata) + if err != nil { + return err + } + jsonByteLen := int64(len(binlogMetaJsonBytes)) + if uploadedLen != jsonByteLen { + return fmt.Errorf("not the same len contentSize=%d, uploadSize=%d", jsonByteLen, uploadedLen) + } + return nil +} + +func getBatchName(num int64) string { + times := num / BatchSize + return fmt.Sprintf("%d_%d", times*BatchSize, (times+1)*BatchSize) +} + +func getBinlogFilepath(binlogFile *BinlogFile) string { + batchName := getBatchName(binlogFile.Num) + return fmt.Sprintf(BinlogFilepathFormat, config.GetBinlogStoragePathPrefix(), binlogFile.Namespace, binlogFile.PxcName, binlogFile.PxcUid, binlogFile.XStoreName, binlogFile.XStoreUid, binlogFile.PodName, binlogFile.Version, batchName, binlogFile.Filename) +} + +func getBinlogMetaFilepath(binlogFile *BinlogFile) string { + batchName := getBatchName(binlogFile.Num) + filename := binlogFile.Filename + ".txt" + return fmt.Sprintf(BinlogMetaFilepathFormat, config.GetBinlogStoragePathPrefix(), binlogFile.Namespace, binlogFile.PxcName, binlogFile.PxcUid, binlogFile.XStoreName, binlogFile.XStoreUid, binlogFile.PodName, binlogFile.Version, batchName, filename) +} + +// RecordUpload add an upload record into db +func RecordUpload(w *Watcher, binlogFile *BinlogFile) bool { + logger := w.uploadLogger.WithValues("action", "RecordUpload") + logger.Info("begin") + err := AddRecord(w.db, *binlogFile) + if err != nil { + logger.Error(err, "failed to record upload file") + //ignore the error + } + logger.Info("success") + return true +} diff --git a/pkg/hpfs/backupbinlog/dao.go b/pkg/hpfs/backupbinlog/dao.go new file mode 100644 index 0000000..4e51cd0 --- /dev/null +++ b/pkg/hpfs/backupbinlog/dao.go @@ -0,0 +1,84 @@ +package backupbinlog + +import ( + "database/sql" + _ "modernc.org/sqlite" +) + +const ( + TableName = "upload_record" + CreateTableSQL = `CREATE TABLE IF NOT EXISTS ` + TableName + ` ( + id BIGINT not null, + pxc_name varchar(200) not null, + pxc_uid varchar(200) not null, + xstore_name varchar(200) not null, + pod_name varchar(200) not null, + version varchar(200) not null, + filepath varchar(300) not null, + filename varchar(50) not null, + num BIGINT not null, + start_index BIGINT, + sink_name varchar(50) not null, + sink_type varchar(50) not null, + binlog_checksum varchar(50) not null, + hash_val varchar(200), + status INT DEFAULT 0 , + err_message TEXT, + file_mod_at datetime default CURRENT_TIMESTAMP, + created_at datetime default CURRENT_TIMESTAMP, + updated_at datetime default CURRENT_TIMESTAMP, + PRIMARY key (id) + )` + CreateIndexSQL = `CREATE INDEX IF NOT EXISTS num_idx on ` + TableName + `(num)` + ReplaceRecordSQL = "REPLACE INTO " + TableName + "(id,pxc_name,pxc_uid,xstore_name,pod_name,version,filepath,filename,num,start_index,hash_val,status,err_message,updated_at,sink_name,sink_type,binlog_checksum,file_mod_at) values(?,?,?,?,?,?,?,?,?,?,?,?,?,datetime('now'),?,?,?,?)" + SelectSQL = "SELECT id,pxc_name,pxc_uid,xstore_name,pod_name,version,filepath,filename,num,start_index,hash_val,status,err_message,created_at,updated_at,sink_name,sink_type,binlog_checksum,file_mod_at FROM " + TableName + " WHERE id=?" + // IdBitNumMask use the low bit num of binlog no. for record id + IdBitNumMask = 0x0ffff + SelectExpiredOneSQL = "" +) + +func GetDb(dbFile string) (*sql.DB, error) { + db, err := sql.Open("sqlite", dbFile) + if err != nil { + return nil, err + } + return db, nil +} + +func TryCreateTableOfUploadRecord(db *sql.DB) error { + _, err := db.Exec(CreateTableSQL) + if err != nil { + return err + } + _, err = db.Exec(CreateIndexSQL) + if err != nil { + return err + } + return nil +} + +func getRecordId(num int64) int64 { + return num & IdBitNumMask +} + +func AddRecord(db *sql.DB, binlogFile BinlogFile) error { + id := getRecordId(binlogFile.Num) + _, err := db.Exec(ReplaceRecordSQL, id, binlogFile.PxcName, binlogFile.PxcUid, binlogFile.XStoreName, binlogFile.PodName, binlogFile.Version, binlogFile.Filepath, binlogFile.Filename, binlogFile.Num, binlogFile.StartIndex, binlogFile.Sha256, binlogFile.Status, binlogFile.ErrMsg, binlogFile.SinkName, binlogFile.SinkType, binlogFile.BinlogChecksum, binlogFile.FileLastModifiedAt) + if err != nil { + return err + } + return nil +} + +func FindRecord(db *sql.DB, num int64) (*BinlogFile, error) { + binlogFile := BinlogFile{} + var id int64 + err := db.QueryRow(SelectSQL, num).Scan(&id, &binlogFile.PxcName, &binlogFile.PxcUid, &binlogFile.XStoreName, &binlogFile.PodName, &binlogFile.Version, &binlogFile.Filepath, &binlogFile.Filename, &binlogFile.Num, &binlogFile.StartIndex, &binlogFile.Sha256, &binlogFile.Status, &binlogFile.ErrMsg, &binlogFile.CreatedAt, &binlogFile.UpdatedAt, &binlogFile.SinkName, &binlogFile.SinkType, &binlogFile.BinlogChecksum, &binlogFile.FileLastModifiedAt) + if err != nil { + if sql.ErrNoRows == err { + return nil, nil + } + return nil, err + } + return &binlogFile, nil +} diff --git a/pkg/hpfs/backupbinlog/heartbeat.go b/pkg/hpfs/backupbinlog/heartbeat.go new file mode 100644 index 0000000..80610ef --- /dev/null +++ b/pkg/hpfs/backupbinlog/heartbeat.go @@ -0,0 +1,171 @@ +package backupbinlog + +import ( + "context" + "database/sql" + "fmt" + "github.com/go-logr/logr" + "os" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "strconv" + "sync" + "time" +) + +const ( + Sname = "pitr_sname" + heartbeatTableDDL = "/*+TDDL:cmd_extra(ENABLE_ASYNC_DDL=FALSE)*/\n" + + "CREATE TABLE IF NOT EXISTS `__cdc_heartbeat__` (\n" + + " `id` bigint(20) NOT NULL AUTO_INCREMENT BY GROUP,\n" + + " `sname` varchar(10) DEFAULT NULL,\n" + + " `gmt_modified` datetime(3) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 broadcast" + PitrHeartbeatId = 111 +) + +const ( + EnvHeartbeatPrefix = "heartbeat_" + EnvHeartbeatHost = EnvHeartbeatPrefix + "host" + EnvHeartbeatPort = EnvHeartbeatPrefix + "port" + EnvHeartbeatUser = EnvHeartbeatPrefix + "user" + EnvHeartbeatPassword = EnvHeartbeatPrefix + "password" + EnvHeartbeatInterval = EnvHeartbeatPrefix + "interval" + EnvMaxRetryCount = EnvHeartbeatPrefix + "max_retry_count" +) + +type HeartBeat struct { + interval time.Duration + host string + port int + user string + pwd string + sql []string + maxRetryCount int + ctx context.Context + cancelFunc context.CancelFunc + lock sync.Mutex + logger logr.Logger +} + +func checkValid(values ...string) { + for _, value := range values { + if value == "" { + panic("invalid value") + } + } +} + +func NewHeatBeat() *HeartBeat { + host := os.Getenv(EnvHeartbeatHost) + portStr := os.Getenv(EnvHeartbeatPort) + user := os.Getenv(EnvHeartbeatUser) + pwd := os.Getenv(EnvHeartbeatPassword) + intervalStr := os.Getenv(EnvHeartbeatInterval) + maxRetryCountStr := os.Getenv(EnvMaxRetryCount) + logger := zap.New(zap.UseDevMode(true)).WithName("HeatBeat") + logger.Info("env values", "host", host, "port", portStr, "user", user, "interval", intervalStr, "maxRetryCount", maxRetryCountStr) + checkValid(host, portStr, user, intervalStr, maxRetryCountStr) + parsedPort, err := strconv.ParseInt(portStr, 10, 64) + if err != nil { + panic("failed to parse port=" + portStr) + } + interval, err := time.ParseDuration(intervalStr) + if err != nil { + panic("failed to parse interval=" + intervalStr) + } + maxRetryCount, err := strconv.ParseInt(maxRetryCountStr, 10, 64) + if err != nil { + panic("failed to parse maxRetryCount=" + maxRetryCountStr) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + return &HeartBeat{ + interval: interval, + sql: []string{"set drds_transaction_policy='TSO'", fmt.Sprintf("replace into `__cdc_heartbeat__`(id,Sname, gmt_modified) values(%d,'%s', now())", PitrHeartbeatId, Sname)}, + host: host, + port: int(parsedPort), + user: user, + pwd: pwd, + maxRetryCount: int(maxRetryCount), + ctx: ctx, + cancelFunc: cancelFunc, + logger: logger, + lock: sync.Mutex{}, + } +} + +func (h *HeartBeat) Cancel() { + h.lock.Lock() + defer h.lock.Unlock() + if h.cancelFunc != nil { + h.logger.Info("cancelled") + h.cancelFunc() + } +} + +func (h *HeartBeat) connect() *sql.DB { + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/__cdc__?timeout=5s&readTimeout=5s", h.user, h.pwd, h.host, h.port)) + if err != nil { + panic(err) + } + return db +} + +func (h *HeartBeat) Do() { + go func() { + defer h.Cancel() + db := h.connect() + defer db.Close() + lastTime := time.Now().Add(-h.interval) + _, err := db.ExecContext(h.ctx, heartbeatTableDDL) + if err != nil { + panic(err) + } + retryCount := 0 + for retryCount < h.maxRetryCount { + nextDuration := int64(h.interval.Milliseconds()) - (time.Now().UnixMilli() - lastTime.UnixMilli()) + select { + case <-time.After(time.Duration(nextDuration) * time.Millisecond): + h.logger.Info("heartbeat") + lastTime = time.Now() + case <-h.ctx.Done(): + return + } + conn, err := db.Conn(h.ctx) + if err != nil { + h.logger.Error(err, "failed to get conn") + retryCount = retryCount + 1 + return + } + tx, err := conn.BeginTx(h.ctx, &sql.TxOptions{}) + if err != nil { + h.logger.Error(err, "fail to begin tx") + retryCount = retryCount + 1 + conn.Close() + continue + } + commit := true + for _, query := range h.sql { + _, err := tx.ExecContext(h.ctx, query) + if err != nil { + h.logger.Error(err, fmt.Sprintf("failed to exec query=%s", query)) + retryCount = retryCount + 1 + tx.Rollback() + commit = false + break + } + } + if commit { + retryCount = 0 + tx.Commit() + } + conn.Close() + } + }() +} + +func (h *HeartBeat) Wait() { + if h.ctx != nil { + <-h.ctx.Done() + } +} diff --git a/pkg/hpfs/backupbinlog/meta.go b/pkg/hpfs/backupbinlog/meta.go new file mode 100644 index 0000000..b779705 --- /dev/null +++ b/pkg/hpfs/backupbinlog/meta.go @@ -0,0 +1,59 @@ +package backupbinlog + +import "time" + +const ( + InfoNamespace = "namespace" + InfoPxcName = "pxc_name" + InfoPxcUid = "pxc_uid" + InfoXStoreName = "xstore_name" + InfoPodName = "pod_name" + InfoVersion = "version" + InfoBinlogChecksum = "binlog_checksum" + InfoSinkType = "sink_type" + InfoSinkName = "sink_name" + InfoXStoreUid = "xstore_uid" + InfoUploadLatest = "upload_latest" + InfoLocalExpireLogSeconds = "local_expire_log_seconds" + InfoMaxLocalBinlogCount = "max_local_binlog_count" + InfoForbidPurge = "forbid_purge" +) + +type Info struct { + Namespace string `json:"namespace,omitempty"` + XStoreName string `json:"xstore_name,omitempty"` + PodName string `json:"pod_name,omitempty"` + Version string `json:"version,omitempty"` + SinkName string `json:"sinkName,omitempty"` + SinkType string `json:"sinkType,omitempty"` + BinlogChecksum string `json:"binlogChecksum,omitempty"` + XStoreUid string `json:"xstoreUid,omitempty"` + PxcName string `json:"pxcName,omitempty"` + PxcUid string `json:"pxcUid,omitempty"` + UploadLatest *bool `json:"uploadLatest,omitempty"` + LocalExpireLogSeconds int64 `json:"localExpireLogSeconds,omitempty"` + MaxLocalBinlogCount int64 `json:"maxLocalBinlogCount,omitempty"` + ForbidPurge bool `json:"forbidPurge,omitempty"` +} + +type BinlogFile struct { + Info `json:"info,omitempty"` + BinlogNum `json:"binlog_num,omitempty"` + StartIndex uint64 `json:"start_index,omitempty"` + EventTimestamp uint64 `json:"event_timestamp,omitempty"` + Sha256 string `json:"sha256,omitempty"` + Size int64 `json:"size,omitempty"` + //Status 0:success + Status int `json:"status,omitempty"` + ErrMsg string `json:"err_msg,omitempty"` + FileLastModifiedAt time.Time `json:"file_last_modified_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +type BinlogNum struct { + Num int64 `json:"num,omitempty"` + Filename string `json:"filename,omitempty"` + Filepath string `json:"filepath,omitempty"` + Latest bool `json:"latest,omitempty"` +} diff --git a/pkg/hpfs/backupbinlog/purge.go b/pkg/hpfs/backupbinlog/purge.go new file mode 100644 index 0000000..1e08b28 --- /dev/null +++ b/pkg/hpfs/backupbinlog/purge.go @@ -0,0 +1,96 @@ +package backupbinlog + +import ( + "database/sql" + "fmt" + _ "github.com/go-sql-driver/mysql" + "github.com/pkg/errors" + "os" + "path/filepath" + "strings" +) + +const ( + ShowConsensusLogSQL = "show consensus logs" + ShowVersion = "select @@version" + PurgeConsensusLogSQLFormat57 = "purge local consensus_log before %d" + PurgeConsensusLogSQLFormat80 = "call dbms_consensus.local_purge_log(%d)" +) + +func FindMysqlSockByLogDir(logDir string, rootDirs []string) (string, error) { + var relativeLogDir string + for _, rootDir := range rootDirs { + if strings.HasPrefix(logDir, rootDir) { + relativeLogDir = logDir[len(rootDir):] + if filepath.IsAbs(relativeLogDir) { + relativeLogDir = relativeLogDir[1:] + } + } + } + relativeMysqlSockPath := filepath.Join(filepath.Dir(relativeLogDir), "run", "mysql.sock") + for _, rootDir := range rootDirs { + absMysqlSockPath := filepath.Join(rootDir, relativeMysqlSockPath) + _, err := os.Stat(absMysqlSockPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return "", err + } + return absMysqlSockPath, nil + } + return "", os.ErrNotExist +} + +func GetMysqlDb(mysqlSockPath string) (*sql.DB, error) { + db, err := sql.Open("mysql", fmt.Sprintf("root@unix(%s)/", mysqlSockPath)) + if err != nil { + return nil, err + } + return db, nil +} + +func Purge(db *sql.DB, beforeIndex uint64) error { + //check version + var version string + err := db.QueryRow(ShowVersion).Scan(&version) + if err != nil { + return err + } + if strings.HasPrefix(version, "5") { + _, err = db.Exec(fmt.Sprintf(PurgeConsensusLogSQLFormat57, beforeIndex)) + } else { + _, err = db.Exec(fmt.Sprintf(PurgeConsensusLogSQLFormat80, beforeIndex)) + } + if err != nil { + return err + } + return nil +} + +type ConsensusLogRow struct { + LogName string + FileSize uint64 + StartLogIndex uint64 +} + +func ShowConsensusLogs(db *sql.DB) ([]ConsensusLogRow, error) { + rows, err := db.Query(ShowConsensusLogSQL) + if err != nil { + return nil, err + } + result := make([]ConsensusLogRow, 0) + defer rows.Close() + for { + if !rows.Next() { + break + } + row := ConsensusLogRow{} + err := rows.Scan(&row.LogName, &row.FileSize, &row.StartLogIndex) + if err != nil { + return nil, err + } + result = append(result, row) + } + return result, nil +} diff --git a/pkg/hpfs/backupbinlog/purge_test.go b/pkg/hpfs/backupbinlog/purge_test.go new file mode 100644 index 0000000..af817fd --- /dev/null +++ b/pkg/hpfs/backupbinlog/purge_test.go @@ -0,0 +1,13 @@ +package backupbinlog + +import ( + "fmt" + "testing" +) + +func TestFindMysqlSock(t *testing.T) { + logDir := "/Users/busu/tmp/mysqldata/log" + rootDirs := []string{"/Users/busu/", "/Users/dingfeng"} + mysqlSockPath, _ := FindMysqlSockByLogDir(logDir, rootDirs) + fmt.Println(mysqlSockPath) +} diff --git a/pkg/hpfs/backupbinlog/start.go b/pkg/hpfs/backupbinlog/start.go new file mode 100644 index 0000000..c61b2c4 --- /dev/null +++ b/pkg/hpfs/backupbinlog/start.go @@ -0,0 +1,76 @@ +package backupbinlog + +import ( + . "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "os" + "path/filepath" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "time" +) + +func StartAllWatchers() { + go func() { + logger := zap.New(zap.UseDevMode(true)).WithName("StartAllWatchers") + for { + config := GetConfig() + if config.BackupBinlogConfig != nil { + for i := 0; i < len(config.BackupBinlogConfig.RootDirectories); i++ { + //key: namespace, value: pod name list + pods := map[string][]string{} + rootDir := config.BackupBinlogConfig.RootDirectories[i] + if filepath.Base(rootDir) == "xstore" && isDir(rootDir) && filepath.IsAbs(rootDir) { + namespaceDirectoryEntries, err := os.ReadDir(rootDir) + if err != nil { + logger.Error(err, "failed to read dir", "dir", rootDir) + continue + } + for _, dirEntry := range namespaceDirectoryEntries { + if dirEntry.IsDir() { + podNameDirectoryEntries, err := os.ReadDir(filepath.Join(rootDir, dirEntry.Name())) + if err != nil { + logger.Error(err, "failed to read dir", "dir", rootDir) + continue + } + podNames := make([]string, 0) + for _, podNameDirEntry := range podNameDirectoryEntries { + if podNameDirEntry.IsDir() { + podNames = append(podNames, podNameDirEntry.Name()) + } + } + pods[dirEntry.Name()] = podNames + } + } + } + for k, v := range pods { + for _, pod := range v { + watcherWorkDir := filepath.Join(rootDir, k, pod, "log") + infoFilepath := filepath.Join(watcherWorkDir, InfoFilename) + _, err := os.Stat(infoFilepath) + if err != nil { + logger.Error(err, "cannot stat file", "filepath", infoFilepath) + continue + } + indexFilepath := filepath.Join(watcherWorkDir, IndexFilename) + _, err = os.Stat(indexFilepath) + if err != nil { + logger.Error(err, "cannot stat file", "indexFilepath", indexFilepath) + continue + } + NewWatcher(watcherWorkDir, BeforeUpload, FetchStartIndex, Upload, RecordUpload, AfterUpload).Start() + } + + } + } + } + time.Sleep(30 * time.Second) + } + }() +} + +func isDir(filepath string) bool { + s, err := os.Stat(filepath) + if err != nil { + return false + } + return s.IsDir() +} diff --git a/pkg/hpfs/backupbinlog/watcher.go b/pkg/hpfs/backupbinlog/watcher.go new file mode 100644 index 0000000..a93d88d --- /dev/null +++ b/pkg/hpfs/backupbinlog/watcher.go @@ -0,0 +1,524 @@ +package backupbinlog + +import ( + "context" + "crypto/md5" + "database/sql" + "encoding/json" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/go-logr/logr" + "github.com/pkg/errors" + "os" + "path" + "path/filepath" + "regexp" + "runtime/debug" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + PERIOD = 5 * time.Second + PurgePeriod = 1 * time.Minute + InfoFilename = "polarx_backupbinloginfo.txt" + InfoVersionFilename = "polarx_backupbinloginfo_version.txt" + IndexFilename = "mysql_bin.index" + UploadRecordsDbFile = "polarx_uploadrecord.db" +) + +var ( + lock = sync.Mutex{} + registeredWatchers = map[string]*Watcher{} +) + +func UploadLatestBinlogFile(workDir string) bool { + setWatcherUploadLatest(workDir) + return checkIfFinishUploadLatest(workDir) +} + +func GetWatcherInfoHash(workDir string) string { + lock.Lock() + defer lock.Unlock() + watcher, ok := registeredWatchers[workDir] + if ok { + return watcher.GetHash() + } + return "" +} + +func setWatcherUploadLatest(workDir string) { + lock.Lock() + defer lock.Unlock() + watcher, ok := registeredWatchers[workDir] + if ok { + watcher.SetUploadLatest(true) + } +} + +func checkIfFinishUploadLatest(workDir string) bool { + indexFilepath := path.Join(workDir, IndexFilename) + _, err := os.Stat(indexFilepath) + if errors.Is(err, os.ErrNotExist) { + return true + } + watcher := NewWatcher(workDir, nil) + binlogNums, err := watcher.readLogIndex(indexFilepath) + if err != nil { + fmt.Printf("err : %+v\n", err) + return true + } + if len(binlogNums) > 0 { + maxBinlogNum := binlogNums[len(binlogNums)-1] + dbFilepath := path.Join(workDir, UploadRecordsDbFile) + db, err := GetDb(dbFilepath) + if err != nil { + fmt.Printf("err : %+v\n", err) + return true + } + defer db.Close() + record, err := FindRecord(db, maxBinlogNum.Num) + if err != nil { + fmt.Printf("err : %+v\n", err) + return true + } + if record == nil || record.Status != 0 { + return false + } + } + return true +} + +func register(watcher *Watcher) bool { + lock.Lock() + defer lock.Unlock() + _, ok := registeredWatchers[watcher.workDir] + if !ok { + registeredWatchers[watcher.workDir] = watcher + return true + } + return false +} + +func unregister(watcher *Watcher) { + lock.Lock() + defer lock.Unlock() + delete(registeredWatchers, watcher.workDir) +} + +type Watcher struct { + workDir string + actions []Callback + ctx context.Context + cancelFunc context.CancelFunc + logger logr.Logger + uploadLogger logr.Logger + db *sql.DB + mysqlDb *sql.DB + lastUploadedBinlogNum int64 + uploadLatest bool + uploadLatestCount uint64 + lock sync.Mutex + lastPurgeTime time.Time + infoHash string +} + +func NewWatcher(workDir string, actions ...Callback) *Watcher { + ctx, cancelFunc := context.WithCancel(context.Background()) + return &Watcher{ + workDir: workDir, + actions: actions, + ctx: ctx, + cancelFunc: cancelFunc, + logger: zap.New(zap.UseDevMode(true)).WithName("MysqlBinlogFileWatcher").WithValues("WorkDirectory", workDir), + lock: sync.Mutex{}, + } +} + +func (w *Watcher) SetHash(hash string) { + w.lock.Lock() + defer w.lock.Unlock() + w.infoHash = hash +} + +func (w *Watcher) GetHash() string { + w.lock.Lock() + defer w.lock.Unlock() + return w.infoHash +} + +func (w *Watcher) SetUploadLatest(uploadLatest bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.uploadLatest = uploadLatest + w.uploadLatestCount = 0 +} + +func (w *Watcher) IsUploadLatest() bool { + w.lock.Lock() + defer w.lock.Unlock() + return w.uploadLatest +} + +func (w *Watcher) UploadLatestCount() uint64 { + w.lock.Lock() + defer w.lock.Unlock() + return w.uploadLatestCount +} + +func (w *Watcher) TryIncrementUploadLatestCount() bool { + w.lock.Lock() + defer w.lock.Unlock() + if w.uploadLatest { + w.uploadLatestCount += 1 + } + return w.uploadLatest +} + +func (w *Watcher) checkFiles(filepaths ...string) bool { + for _, filepath := range filepaths { + _, err := os.Stat(filepath) + if err != nil { + w.logger.Error(err, "failed to check file", "filepath", filepath) + return false + } + } + return true +} + +func (w *Watcher) readFileLines(filepath string) ([]string, error) { + bytes, err := os.ReadFile(filepath) + if err != nil { + w.logger.Error(err, "Failed to readfile", "filepath", filepath) + return nil, err + } + content := string(bytes) + result := strings.Split(content, "\n") + return result, nil +} + +func (w *Watcher) readLogIndex(indexFilepath string) ([]BinlogNum, error) { + lines, err := w.readFileLines(indexFilepath) + if err != nil { + return nil, err + } + reg := regexp.MustCompile(`^mysql_bin.(\w+)$`) + binlogNums := make([]BinlogNum, 0, len(lines)) + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 { + continue + } + filename := filepath.Base(line) + matchedResult := reg.FindStringSubmatch(filename) + if len(matchedResult) == 2 { + num, err := strconv.ParseInt(matchedResult[1], 10, 63) + if err != nil { + w.logger.Error(err, "filename suffix is not a number", "filename", filename) + return nil, err + } + binlogNums = append(binlogNums, BinlogNum{ + Num: num, + Filename: filename, + Filepath: filepath.Join(filepath.Dir(indexFilepath), filename), + }) + } + } + sort.Slice(binlogNums, func(i, j int) bool { + return binlogNums[i].Num < binlogNums[j].Num + }) + return binlogNums, nil +} + +func (w *Watcher) readInfo(filepath string) (*Info, error) { + lines, err := w.readFileLines(filepath) + if err != nil { + return nil, err + } + infoMap := map[string]string{} + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 { + continue + } + kv := strings.Split(line, "=") + if len(kv) != 2 { + err := fmt.Errorf("invalid field, line %s", line) + w.logger.Error(err, "failed to parse field line", "filepath", filepath) + return nil, err + } + infoMap[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1]) + } + namespace, namespaceOk := infoMap[InfoNamespace] + xstoreName, xstoreNameOk := infoMap[InfoXStoreName] + podName, podNameOk := infoMap[InfoPodName] + version, versionOk := infoMap[InfoVersion] + binlogChecksum, binlogChecksumOk := infoMap[InfoBinlogChecksum] + sinkName, sinkNameOk := infoMap[InfoSinkName] + sinkType, sinkTypeOk := infoMap[InfoSinkType] + xstoreUid, xstoreUidOk := infoMap[InfoXStoreUid] + pxcName, pxcNameOk := infoMap[InfoPxcName] + pxcUid, pxcUidOk := infoMap[InfoPxcUid] + uploadLatest, uploadLatestOk := infoMap[InfoUploadLatest] + //InfoExpireLogDays InfoMaxLocalBinlogCount + expireLogSeconds, expireLogSecondsOk := infoMap[InfoLocalExpireLogSeconds] + maxLocalBinlogCount, maxLocalBinlogCountOk := infoMap[InfoMaxLocalBinlogCount] + forbidPurgeStr, forbidPurgeOk := infoMap[InfoForbidPurge] + if !(namespaceOk && xstoreNameOk && podNameOk && versionOk && binlogChecksumOk && sinkNameOk && sinkTypeOk && xstoreUidOk && pxcNameOk && pxcUidOk && forbidPurgeOk) { + err := fmt.Errorf("invalid info") + w.logger.Error(err, InfoNamespace, namespace, InfoXStoreName, xstoreName, InfoPodName, podName, InfoVersion, version, InfoBinlogChecksum, binlogChecksum, InfoSinkName, sinkName, InfoSinkType, sinkType, InfoXStoreUid, xstoreUid, InfoPxcName, pxcName, InfoPxcUid, pxcUid) + return nil, err + } + forbidPurge, err := strconv.ParseBool(forbidPurgeStr) + if err != nil { + w.logger.Error(err, fmt.Sprintf("failed to parse forbid purge, str value = %s", forbidPurgeStr)) + return nil, err + } + var infoUploadLatest *bool + if uploadLatestOk { + uploadLatest = strings.TrimSpace(uploadLatest) + val := strings.EqualFold(uploadLatest, "true") || strings.EqualFold(uploadLatest, "1") + infoUploadLatest = &val + } + + var infoExpireLogSeconds int64 = int64(config.GetConfig().BackupBinlogConfig.GetExpireLogHours() * 3600) + if expireLogSecondsOk { + val, err := strconv.ParseInt(expireLogSeconds, 10, 64) + if err != nil { + w.logger.Error(err, fmt.Sprintf("failed to parse expireLogSeconds , strValue=%s", expireLogSeconds)) + } else { + infoExpireLogSeconds = val + } + } + + var infoMaxLocalBinlogCount int64 = config.GetConfig().BackupBinlogConfig.GetMaxLocalBinlogCount() + if maxLocalBinlogCountOk { + val, err := strconv.ParseInt(maxLocalBinlogCount, 10, 63) + if err != nil { + w.logger.Error(err, fmt.Sprintf("failed to parse maxLocalBinlogCount , strValue=%s", maxLocalBinlogCount)) + } else { + infoMaxLocalBinlogCount = val + } + } + info := &Info{ + Namespace: namespace, + XStoreName: xstoreName, + SinkName: sinkName, + SinkType: sinkType, + BinlogChecksum: binlogChecksum, + XStoreUid: xstoreUid, + PxcName: pxcName, + PxcUid: pxcUid, + LocalExpireLogSeconds: infoExpireLogSeconds, + MaxLocalBinlogCount: infoMaxLocalBinlogCount, + ForbidPurge: forbidPurge, + } + infoJson, _ := json.Marshal(info) + w.SetHash(fmt.Sprintf("%x", md5.Sum(infoJson))) + info.Version = version + info.PodName = podName + if info.UploadLatest != infoUploadLatest { + info.UploadLatest = infoUploadLatest + infoJson, _ = json.Marshal(info) + } + w.logger.Info("readInfo: " + string(infoJson)) + return info, nil +} + +func (w *Watcher) Start() { + go func() { + defer func() { + if err := recover(); err != nil { + w.logger.Info("Skip panic", "err", err, "stack", string(debug.Stack())) + } + }() + if !register(w) { + return + } + defer unregister(w) + infoFilepath := path.Join(w.workDir, InfoFilename) + indexFilepath := path.Join(w.workDir, IndexFilename) + if !w.checkFiles(infoFilepath, indexFilepath) { + return + } + dbFilepath := path.Join(w.workDir, UploadRecordsDbFile) + db, err := GetDb(dbFilepath) + if err != nil { + w.logger.Error(err, "failed to GetDb", "DbFilepath", dbFilepath) + return + } + defer db.Close() + err = TryCreateTableOfUploadRecord(db) + if err != nil { + w.logger.Error(err, "TryCreateTableOfUploadRecord") + return + } + w.db = db + + // find mysql socket filepath + mysqlSockPath, err := FindMysqlSockByLogDir(w.workDir, config.GetConfig().BackupBinlogConfig.RootDirectories) + if err != nil { + w.logger.Error(err, "failed to find mysql sock by log dir", "logDir", w.workDir, "rootDirs", config.GetConfig().BackupBinlogConfig.RootDirectories) + return + } + mysqlDb, err := GetMysqlDb(mysqlSockPath) + if err != nil { + w.logger.Error(err, fmt.Sprintf("failed to get mysql db mysqlSockFilepath=%s", mysqlSockPath)) + return + } + w.mysqlDb = mysqlDb + + for { + binlogNums, err := w.readLogIndex(indexFilepath) + if err != nil { + return + } + info, err := w.readInfo(infoFilepath) + if err != nil { + return + } + if info.UploadLatest != nil { + w.SetUploadLatest(*info.UploadLatest) + } + + // purge consensus logs + if w.lastPurgeTime.Add(PurgePeriod).Before(time.Now()) { + go func() { + defer func() { + if err := recover(); err != nil { + w.logger.Info("Skip panic", "err", err, "stack", string(debug.Stack())) + } + }() + if len(binlogNums) > 1 { + err := w.tryPurge(w.db, w.mysqlDb, *info, binlogNums[:len(binlogNums)-1]) + if err != nil { + w.logger.Error(err, "failed to purge") + } + } + + }() + w.lastPurgeTime = time.Now() + } + + // upload consensus logs + binlogNumOut: + for binlogNumArrayIndex, binlogNum := range binlogNums { + if binlogNum.Num <= w.lastUploadedBinlogNum { + continue + } + var thisLatestBinlogUploaded bool + if binlogNumArrayIndex == len(binlogNums)-1 { + if w.IsUploadLatest() { + thisLatestBinlogUploaded = true + } else { + //not upload the latest binlog file + continue + } + } + binlogFile, err := FindRecord(db, binlogNum.Num) + if err != nil { + w.logger.Error(err, "failed to find record", "num", binlogNum.Num) + return + } + if binlogFile == nil || binlogFile.Status != 0 { + newBinlogFile := BinlogFile{ + Info: *info, + BinlogNum: binlogNum, + } + if w.actions != nil { + for _, action := range w.actions { + if !action(w, &newBinlogFile) { + // breaking exec action if one action fails + w.logger.Info("action executing interrupt", "binlog filepath", newBinlogFile.Filepath) + break binlogNumOut + } + } + } + } + w.lastUploadedBinlogNum = binlogNum.Num + if thisLatestBinlogUploaded { + w.TryIncrementUploadLatestCount() + } + } + select { + case <-w.ctx.Done(): + return + case <-time.After(PERIOD): + break + } + } + }() +} + +func (w *Watcher) tryPurge(sqliteDb *sql.DB, mysqlDb *sql.DB, info Info, binlogNums []BinlogNum) error { + w.logger.Info("begin tryPurge consensus los") + defer w.logger.Info("end tryPurge consensus logs") + if info.ForbidPurge { + w.logger.Info("forbid purge. ignore this trigger") + return nil + } + var purgeBinlogNum *BinlogNum + countPurgeCnt := len(binlogNums) - int(info.MaxLocalBinlogCount) + if countPurgeCnt > 0 { + // need purge because of count + purgeBinlogNum = &binlogNums[countPurgeCnt-1] + } + expireTime := time.Now().Add(time.Duration(-info.LocalExpireLogSeconds) * time.Second) + for _, binlogNum := range binlogNums { + fileInfo, err := os.Stat(binlogNum.Filepath) + if errors.Is(err, os.ErrNotExist) { + continue + } + if fileInfo.ModTime().Before(expireTime) { + if purgeBinlogNum == nil { + purgeBinlogNum = &binlogNum + } else if binlogNum.Num > purgeBinlogNum.Num { + purgeBinlogNum = &binlogNum + } + } + } + if purgeBinlogNum != nil { + //check if the binlog has been uploaded + w.logger.Info(fmt.Sprintf("check the uploaded status of %s", purgeBinlogNum.Filename)) + uploadedRecord, err := FindRecord(sqliteDb, purgeBinlogNum.Num) + if err != nil { + w.logger.Error(err, "failed to find the binlog", "binlog filename", purgeBinlogNum.Filename) + return err + } + if uploadedRecord != nil && uploadedRecord.Status == 0 { + //do purge + consensusLogRows, err := ShowConsensusLogs(w.mysqlDb) + if err != nil { + w.logger.Error(err, "failed to show consensus logs") + return err + } + sort.Slice(consensusLogRows, func(i, j int) bool { + return consensusLogRows[i].StartLogIndex < consensusLogRows[j].StartLogIndex + }) + for i, consensusLogRow := range consensusLogRows { + if consensusLogRow.LogName == purgeBinlogNum.Filename { + if len(consensusLogRows) > i+1 { + purgeToIndex := consensusLogRows[i+1].StartLogIndex + w.logger.Info(fmt.Sprintf("exec purge local consensus_log before %d", purgeToIndex)) + err := Purge(w.mysqlDb, purgeToIndex) + if err != nil { + w.logger.Error(err, "fail to purge local consensus_log") + return err + } + w.logger.Info("succeeded to purge local consensus_log") + } + break + } + } + } + } + return nil +} + +func (w *Watcher) Stop() { + if w.cancelFunc != nil { + w.cancelFunc() + } +} diff --git a/pkg/hpfs/backupbinlog/watcher_test.go b/pkg/hpfs/backupbinlog/watcher_test.go new file mode 100644 index 0000000..1d2425a --- /dev/null +++ b/pkg/hpfs/backupbinlog/watcher_test.go @@ -0,0 +1,11 @@ +package backupbinlog + +import ( + "testing" + "time" +) + +func TestWatcher(t *testing.T) { + NewWatcher("/Users/busu/tmp/litewatchertest", BeforeUpload, FetchStartIndex, Upload, RecordUpload, AfterUpload).Start() + time.Sleep(1 * time.Hour) +} diff --git a/pkg/hpfs/common/common.go b/pkg/hpfs/common/common.go new file mode 100644 index 0000000..22ad781 --- /dev/null +++ b/pkg/hpfs/common/common.go @@ -0,0 +1,61 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "encoding/binary" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + AffectedFiles = "AffectedFiles" +) + +func ReadBytes(reader io.Reader, len uint64) ([]byte, error) { + bytes := make([]byte, len) + for readLen := uint64(0); readLen < len; { + nowReadLen, err := reader.Read(bytes[readLen:]) + if nowReadLen == 0 { + return bytes[:readLen], err + } + readLen += uint64(nowReadLen) + } + return bytes, nil +} + +func ReadInt64(reader io.Reader) (int64, error) { + bytes, err := ReadBytes(reader, 8) + if err != nil { + return int64(0), err + } + return int64(binary.BigEndian.Uint64(bytes)), nil +} + +func ParseNetAddr(addr string) (string, int) { + strs := strings.Split(strings.Trim(addr, " "), ":") + if len(strs) != 2 { + panic(fmt.Sprintf("invalid addr %s", addr)) + } + port, err := strconv.Atoi(strs[1]) + if err != nil { + panic(fmt.Sprintf("invalid addr %s %v", addr, err)) + } + return strs[0], port +} diff --git a/pkg/hpfs/filestream/common_test.go b/pkg/hpfs/common/common_test.go similarity index 99% rename from pkg/hpfs/filestream/common_test.go rename to pkg/hpfs/common/common_test.go index 755fe02..6a4a325 100644 --- a/pkg/hpfs/filestream/common_test.go +++ b/pkg/hpfs/common/common_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package filestream +package common import ( "encoding/json" diff --git a/pkg/hpfs/filestream/config.go b/pkg/hpfs/config/config.go similarity index 56% rename from pkg/hpfs/filestream/config.go rename to pkg/hpfs/config/config.go index 6534ecc..36d8453 100644 --- a/pkg/hpfs/filestream/config.go +++ b/pkg/hpfs/config/config.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package filestream +package config import ( "errors" "fmt" "k8s.io/apimachinery/pkg/util/yaml" "os" + "path/filepath" "reflect" "sync/atomic" "time" @@ -29,8 +30,11 @@ import ( var ConfigFilepath = "/config/config.yaml" const ( - SinkTypeOss = "oss" - SinkTypeSftp = "sftp" + SinkTypeOss = "oss" + SinkTypeSftp = "sftp" + SinkTypeNone = "none" + DefaultLocalExpireLogHours float64 = 7 + DefaultMaxLocalBinlogCount = 50 ) type OssSink struct { @@ -54,8 +58,30 @@ type Sink struct { SftpSink } +type BackupBinlogConfig struct { + RootDirectories []string `json:"rootDirectories,omitempty"` + StoragePathPrefix string `json:"storagePathPrefix,omitempty"` + LocalExpireLogHours *float64 `json:"localExpireLogHours,omitempty"` + MaxLocalBinlogCount *int64 `json:"maxLocalBinlogCount,omitempty"` +} + +func (bbc *BackupBinlogConfig) GetExpireLogHours() float64 { + if bbc.LocalExpireLogHours == nil { + return DefaultLocalExpireLogHours + } + return *bbc.LocalExpireLogHours +} + +func (bbc *BackupBinlogConfig) GetMaxLocalBinlogCount() int64 { + if bbc.MaxLocalBinlogCount == nil { + return DefaultMaxLocalBinlogCount + } + return *bbc.MaxLocalBinlogCount +} + type Config struct { - Sinks []Sink `json:"sinks,omitempty"` + Sinks []Sink `json:"sinks,omitempty"` + BackupBinlogConfig *BackupBinlogConfig `json:"backupBinlogConfig,omitempty"` } var configValue atomic.Value @@ -108,3 +134,24 @@ func ReloadConfig() { configValue.Swap(config) fmt.Println(time.Now().Format("2006-01-02 15:04:05") + " filestream config changed") } + +func GetBinlogStoragePathPrefix() string { + prefix := "polardbx-binlogbackup" + if GetConfig().BackupBinlogConfig != nil && GetConfig().BackupBinlogConfig.StoragePathPrefix != "" { + prefix = GetConfig().BackupBinlogConfig.StoragePathPrefix + } + return prefix +} + +func GetXStorePodBinlogStorageDirectory(namespace string, pxcName string, pxcUid string, xStoreName string, xStoreUid string, podName string) string { + prefix := GetBinlogStoragePathPrefix() + if xStoreName == "" { + return filepath.Join(prefix, namespace, pxcName, pxcUid) + } + return filepath.Join(prefix, namespace, pxcName, pxcUid, xStoreName, xStoreUid, podName) +} + +func GetPxcBinlogStorageDirectory(namespace string, pxcName string, pxcUid string) string { + prefix := GetBinlogStoragePathPrefix() + return filepath.Join(prefix, namespace, pxcName, pxcUid) +} diff --git a/pkg/hpfs/filestream/config_test.go b/pkg/hpfs/config/config_test.go similarity index 99% rename from pkg/hpfs/filestream/config_test.go rename to pkg/hpfs/config/config_test.go index 41a93fc..7f89bac 100644 --- a/pkg/hpfs/filestream/config_test.go +++ b/pkg/hpfs/config/config_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package filestream +package config import ( "os" diff --git a/pkg/hpfs/filestream/client.go b/pkg/hpfs/filestream/client.go index 0a89905..b052d9d 100644 --- a/pkg/hpfs/filestream/client.go +++ b/pkg/hpfs/filestream/client.go @@ -20,7 +20,10 @@ import ( "encoding/binary" "errors" "fmt" + . "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "go.uber.org/atomic" "io" + net2 "k8s.io/apimachinery/pkg/util/net" "net" "os" "time" @@ -33,6 +36,8 @@ type FileClient struct { port int flowControl FlowControl returnConn net.Conn + waitChan chan error + lastLen atomic.Uint64 } func NewFileClient(host string, port int, flowControl FlowControl) *FileClient { @@ -43,6 +48,10 @@ func NewFileClient(host string, port int, flowControl FlowControl) *FileClient { } } +func (f *FileClient) GetLastLen() uint64 { + return f.lastLen.Load() +} + func (f *FileClient) addr() string { return fmt.Sprintf("%s:%d", f.host, f.port) } @@ -78,6 +87,9 @@ func (f *FileClient) Upload(reader io.Reader, actionMetadata ActionMetadata) (in conn.SetReadDeadline(time.Now().Add(60 * time.Second)) len, err := ReadInt64(conn) if err != nil { + if net2.IsProbableEOF(err) && lastWrittenLen == written { + return lastWrittenLen, nil + } return lastWrittenLen, err } lastWrittenLen = len @@ -129,7 +141,15 @@ func (f *FileClient) Download(writer io.Writer, actionMetadata ActionMetadata) ( return len, err } bytes, err := ReadBytes(conn, 8) + if err != nil { + f.waitChan <- err + return 0, err + } + if f.waitChan != nil { + f.waitChan <- nil + } len := binary.BigEndian.Uint64(bytes) + f.lastLen.Store(len) copiedLen, err := f.copy(conn, writer) if err != nil { return copiedLen, err @@ -140,6 +160,30 @@ func (f *FileClient) Download(writer io.Writer, actionMetadata ActionMetadata) ( return copiedLen, nil } +func (f *FileClient) InitWaitChan() { + if f.waitChan != nil { + close(f.waitChan) + } + f.waitChan = make(chan error, 1) +} + +func (f *FileClient) WaitForDownload() error { + if f.waitChan != nil { + select { + case <-time.After(20 * time.Second): + return errors.New("timeout") + case err := <-f.waitChan: + return err + } + } + return nil +} + +// List aims to list files in Filepath of ActionMetadata, the only difference with Download is Action +func (f *FileClient) List(writer io.Writer, actionMetadata ActionMetadata) (int64, error) { + return f.Download(writer, actionMetadata) +} + func (f *FileClient) writeMagicNumber(conn net.Conn) { bytes := make([]byte, 4) binary.BigEndian.PutUint32(bytes, MagicNumber) diff --git a/pkg/hpfs/filestream/common.go b/pkg/hpfs/filestream/common.go index 71f8604..e66953d 100644 --- a/pkg/hpfs/filestream/common.go +++ b/pkg/hpfs/filestream/common.go @@ -1,57 +1,30 @@ -/* -Copyright 2021 Alibaba Group Holding Limited. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package filestream import ( - "encoding/binary" - "fmt" - "io" - "strconv" - "strings" + "encoding/json" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/discovery" + "os" ) -func ReadBytes(reader io.Reader, len uint64) ([]byte, error) { - bytes := make([]byte, len) - for readLen := uint64(0); readLen < len; { - nowReadLen, err := reader.Read(bytes[readLen:]) - if nowReadLen == 0 { - return bytes, err - } - readLen += uint64(nowReadLen) +func GetClientActionBySinkType(sinkType string) Action { + switch sinkType { + case config.SinkTypeOss: + return DownloadOss + case config.SinkTypeSftp: + return DownloadSsh } - return bytes, nil + return InvalidAction } -func ReadInt64(reader io.Reader) (int64, error) { - bytes, err := ReadBytes(reader, 8) +func GetHostInfoFromConfig(filepath string) (map[string]discovery.HostInfo, error) { + data, err := os.ReadFile(filepath) if err != nil { - return int64(0), err + return nil, err } - return int64(binary.BigEndian.Uint64(bytes)), nil -} - -func ParseNetAddr(addr string) (string, int) { - strs := strings.Split(strings.Trim(addr, " "), ":") - if len(strs) != 2 { - panic(fmt.Sprintf("invalid addr %s", addr)) - } - port, err := strconv.Atoi(strs[1]) - if err != nil { - panic(fmt.Sprintf("invalid addr %s %v", addr, err)) + result := map[string]discovery.HostInfo{} + if err = json.Unmarshal(data, &result); err != nil { + return nil, err } - return strs[0], port + return result, nil } diff --git a/pkg/hpfs/filestream/request.go b/pkg/hpfs/filestream/request.go index ab9ca8c..1534a20 100644 --- a/pkg/hpfs/filestream/request.go +++ b/pkg/hpfs/filestream/request.go @@ -27,9 +27,12 @@ const ( DownloadRemote Action = "downloadRemote" UploadOss Action = "uploadOss" DownloadOss Action = "downloadOss" - CheckTask Action = "CheckTask" + ListOss Action = "listOss" + CheckTask Action = "checkTask" UploadSsh Action = "uploadSsh" - DownloadSsh Action = "DownloadSsh" + DownloadSsh Action = "downloadSsh" + ListSsh Action = "listSsh" + InvalidAction Action = "" ) const ( @@ -40,7 +43,7 @@ const ( const ( MetaDataLenLen = 4 - MetaFiledLen = 10 + MetaFiledLen = 11 MetadataActionOffset = 0 MetadataInstanceIdOffset = 1 MetadataFilenameOffset = 2 @@ -51,6 +54,7 @@ const ( MetadataSinkOffset = 7 MetadataRequestIdOffset = 8 MetadataOssBufferSizeOffset = 9 + MetadataLimitSize = 10 ) var ActionLocal2Remote2 = map[Action]Action{ @@ -71,9 +75,25 @@ type ActionMetadata struct { Sink string `json:"sink,omitempty"` RequestId string `json:"requestId,omitempty"` OssBufferSize string `json:"ossBufferSize,omitempty"` + LimitSize string `json:"limitSize,omitempty"` redirect bool } func (action *ActionMetadata) ToString() string { - return strings.Join([]string{string(action.Action), action.InstanceId, action.Filename, action.RedirectAddr, action.Filepath, action.RetentionTime, action.Stream, action.Sink, action.RequestId, action.OssBufferSize}, ",") + return strings.Join([]string{string(action.Action), action.InstanceId, action.Filename, action.RedirectAddr, action.Filepath, action.RetentionTime, action.Stream, action.Sink, action.RequestId, action.OssBufferSize, action.LimitSize}, ",") +} + +const ( + RemoteNodePrefix = "RemoteNode=" +) + +func GetRemoteAddrByNodeName(nodeName string) string { + return RemoteNodePrefix + nodeName +} + +func GetNodeNameFromRemoteAddr(addr string) string { + if strings.HasPrefix(addr, RemoteNodePrefix) { + return addr[len(RemoteNodePrefix):] + } + return "" } diff --git a/pkg/hpfs/filestream/server.go b/pkg/hpfs/filestream/server.go index 0cd3a05..46560fa 100644 --- a/pkg/hpfs/filestream/server.go +++ b/pkg/hpfs/filestream/server.go @@ -18,13 +18,19 @@ package filestream import ( "archive/tar" + "bytes" "context" "encoding/binary" + "encoding/json" "errors" "fmt" + . "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + . "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/discovery" "github.com/alibaba/polardbx-operator/pkg/hpfs/remote" polarxJson "github.com/alibaba/polardbx-operator/pkg/util/json" polarxMap "github.com/alibaba/polardbx-operator/pkg/util/map" + polarxPath "github.com/alibaba/polardbx-operator/pkg/util/path" "github.com/go-logr/logr" "github.com/google/uuid" "github.com/pkg/sftp" @@ -34,6 +40,7 @@ import ( "os" "path/filepath" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "strconv" "strings" "sync" "time" @@ -82,13 +89,6 @@ func NewFileServer(host string, port int, fileRootPath string, flowControl FlowC } func (f *FileServer) Start() error { - InitConfig() - go func() { - for { - time.Sleep(70 * time.Second) - ReloadConfig() - } - }() listen, err := net.Listen(NetType, fmt.Sprintf("%s:%d", f.host, f.port)) if err != nil { f.logger.Error(err, "Failed to listen") @@ -141,11 +141,11 @@ func (f *FileServer) handleRequest(conn net.Conn) error { case strings.ToLower(string(UploadLocal)): f.markTask(logger, metadata, TaskStateDoing) err := f.processUploadLocal(logger, metadata, conn) - f.processTaskResult(err, metadata) + f.processTaskResult(logger, err, metadata) case strings.ToLower(string(UploadRemote)): f.markTask(logger, metadata, TaskStateDoing) err := f.processUploadRemote(logger, metadata, conn) - f.processTaskResult(err, metadata) + f.processTaskResult(logger, err, metadata) case strings.ToLower(string(DownloadLocal)): f.processDownloadLocal(logger, metadata, conn) case strings.ToLower(string(DownloadRemote)): @@ -153,15 +153,19 @@ func (f *FileServer) handleRequest(conn net.Conn) error { case strings.ToLower(string(UploadOss)): f.markTask(logger, metadata, TaskStateDoing) err := f.processUploadOss(logger, metadata, conn) - f.processTaskResult(err, metadata) + f.processTaskResult(logger, err, metadata) case strings.ToLower(string(DownloadOss)): f.processDownloadOss(logger, metadata, conn) + case strings.ToLower(string(ListOss)): + f.processListOss(logger, metadata, conn) case strings.ToLower(string(UploadSsh)): f.markTask(logger, metadata, TaskStateDoing) err := f.processUploadSsh(logger, metadata, conn) - f.processTaskResult(err, metadata) + f.processTaskResult(logger, err, metadata) case strings.ToLower(string(DownloadSsh)): f.processDownloadSsh(logger, metadata, conn) + case strings.ToLower(string(ListSsh)): + f.processListSsh(logger, metadata, conn) case strings.ToLower(string(CheckTask)): f.processCheckTask(logger, metadata, conn) default: @@ -176,9 +180,10 @@ func (f *FileServer) markTask(logger logr.Logger, metadata ActionMetadata, value TaskMap.Store(metadata.RequestId, value) } -func (f *FileServer) processTaskResult(err error, metadata ActionMetadata) { +func (f *FileServer) processTaskResult(logger logr.Logger, err error, metadata ActionMetadata) { taskState := TaskStateSuccess if err != nil { + logger.Error(err, "Failed to process task result") taskState = TaskStateFailed } TaskMap.Store(metadata.RequestId, taskState) @@ -238,6 +243,9 @@ func (f *FileServer) processUploadSsh(logger logr.Logger, metadata ActionMetadat metadata.Filepath = filepath } destFilepath := metadata.Filepath + if !strings.HasPrefix(destFilepath, "/") { + destFilepath = filepath.Join(sink.RootPath, destFilepath) + } sshConn, err := getSshConn(*sink) if err != nil { logger.Error(err, "failed to get ssh conn") @@ -281,6 +289,9 @@ func (f *FileServer) processDownloadSsh(logger logr.Logger, metadata ActionMetad metadata.Filepath = filepath } destFilepath := metadata.Filepath + if !strings.HasPrefix(destFilepath, "/") { + destFilepath = filepath.Join(sink.RootPath, destFilepath) + } sshConn, err := getSshConn(*sink) if err != nil { logger.Error(err, "failed to get ssh conn") @@ -317,11 +328,87 @@ func (f *FileServer) processDownloadSsh(logger logr.Logger, metadata ActionMetad sizeBytes := make([]byte, 8) binary.BigEndian.PutUint64(sizeBytes, uint64(size)) writer.Write(sizeBytes[:]) - len, _ := f.flowControl.LimitFlow(fd, writer, nil) + var reader io.Reader = fd + if metadata.LimitSize != "" { + limitSize, _ := strconv.ParseInt(metadata.LimitSize, 10, 64) + reader = io.LimitReader(reader, limitSize) + } + len, _ := f.flowControl.LimitFlow(reader, writer, nil) + logger.Info("limitFlow", "len", len) + return nil +} + +func (f *FileServer) processListSsh(logger logr.Logger, metadata ActionMetadata, writer io.Writer) error { + sink, err := GetSink(metadata.Sink, SinkTypeSftp) + if err != nil { + logger.Error(err, "fail to get sink", "sinkName", metadata.Sink) + return err + } + if metadata.Filepath == "" { + logger.Info("no path provided, only root path will be used: " + sink.RootPath) + } + metadata.Filepath = filepath.Join(sink.RootPath, metadata.Filepath) + sshConn, err := getSshConn(*sink) + if err != nil { + logger.Error(err, "failed to get ssh conn") + return err + } + defer sshConn.Close() + client, err := sftp.NewClient(sshConn) + if err != nil { + logger.Error(err, "failed to get sftp client") + return err + } + defer client.Close() + entries, err := client.ReadDir(metadata.Filepath) + if err != nil { + logger.Error(err, "failed to read dir: "+metadata.Filepath) + return err + } + // just extract name from os.FileInfo + entryNames := make([]string, len(entries)) + for _, entry := range entries { + entryNames = append(entryNames, entry.Name()) + } + // encode the slice using json + encodedEntryNames, err := json.Marshal(entryNames) + if err != nil { + logger.Error(err, "failed to encode entry names") + return err + } + size := len(encodedEntryNames) + sizeBytes := make([]byte, 8) + binary.BigEndian.PutUint64(sizeBytes, uint64(size)) + writer.Write(sizeBytes[:]) + len, _ := f.flowControl.LimitFlow(bytes.NewReader(encodedEntryNames), writer, nil) logger.Info("limitFlow", "len", len) return nil } +func sftpRemove(client *sftp.Client, file string) error { + fileInfo, err := client.Stat(file) + if err != nil { + return err + } + if fileInfo.IsDir() { + fileInfos, err := client.ReadDir(file) + if err != nil { + return err + } + for _, fi := range fileInfos { + err = sftpRemove(client, filepath.Join(file, fi.Name())) + if err != nil { + return err + } + } + } + err = client.Remove(file) + if err != nil { + return err + } + return nil +} + func getOssAuth(sink Sink) map[string]string { return map[string]string{ "endpoint": sink.Endpoint, @@ -402,18 +489,29 @@ func (f *FileServer) processDownloadOss(logger logr.Logger, metadata ActionMetad filepath := filepath.Join(metadata.InstanceId, metadata.Filename) metadata.Filepath = filepath } - reader, writer := io.Pipe() + pReader, writer := io.Pipe() defer func() { writer.Close() - reader.Close() + pReader.Close() }() var wg sync.WaitGroup wg.Add(1) go func() { defer func() { - reader.Close() + err := recover() + if err != nil { + logger.Info("panic", "err", err) + } + }() + defer func() { + pReader.Close() wg.Done() }() + var reader io.Reader = pReader + if metadata.LimitSize != "" { + limitSize, _ := strconv.ParseInt(metadata.LimitSize, 10, 64) + reader = io.LimitReader(reader, limitSize) + } len, _ := f.flowControl.LimitFlow(reader, conn, nil) logger.Info("limitFlow", "len", len) }() @@ -436,8 +534,57 @@ func (f *FileServer) processDownloadOss(logger logr.Logger, metadata ActionMetad return nil } +func (f *FileServer) processListOss(logger logr.Logger, metadata ActionMetadata, conn net.Conn) error { + sink, err := GetSink(metadata.Sink, SinkTypeOss) + if err != nil { + logger.Error(err, "fail to get sink", "sinkName", metadata.Sink) + return err + } + fileService, err := remote.GetFileService("aliyun-oss") + if err != nil { + logger.Error(err, "Failed to get file service of aliyun-oss") + return err + } + if metadata.Filepath != "" && metadata.Filepath[len(metadata.Filepath)-1] != '/' { + metadata.Filepath = polarxPath.NewPathFromStringSequence(metadata.Filepath, "") + } + logger.Info("filepath to be listed: " + metadata.Filepath) + reader, writer := io.Pipe() + defer func() { + writer.Close() + reader.Close() + }() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer func() { + reader.Close() + wg.Done() + }() + len, _ := f.flowControl.LimitFlow(reader, conn, nil) + logger.Info("limitFlow", "len", len) + }() + ctx := context.Background() + nowOssParams := polarxMap.MergeMap(map[string]string{}, OssParams, false).(map[string]string) + nowOssParams["bucket"] = sink.Bucket + ossAuth := getOssAuth(*sink) + ft, err := fileService.ListFiles(ctx, writer, metadata.Filepath, ossAuth, nowOssParams) + if err != nil { + logger.Error(err, "Failed to list file from oss ") + return err + } + err = ft.Wait() + writer.Close() + if err != nil { + logger.Error(err, "Failed to list file from oss ") + return err + } + wg.Wait() + return nil +} + func (f *FileServer) processUploadRemote(logger logr.Logger, metadata ActionMetadata, conn net.Conn) error { - host, port := ParseNetAddr(metadata.RedirectAddr) + host, port := f.parseNetAddr(metadata.RedirectAddr) fileClient := NewFileClient(host, port, f.flowControl) remoteMetadata := metadata remoteMetadata.Action = ActionLocal2Remote2[metadata.Action] @@ -529,6 +676,9 @@ func (f *FileServer) writeTarFiles(logger logr.Logger, dirPath string, metadata }() len, err := f.flowControl.LimitFlow(conn, pipeWriter, conn) logger.Info("limitFlow", "len", len) + if err == io.EOF { + return nil + } return err } @@ -657,13 +807,34 @@ func (f *FileServer) processDownloadLocal(logger logr.Logger, metadata ActionMet sizeBytes := make([]byte, 8) binary.BigEndian.PutUint64(sizeBytes, uint64(size)) writer.Write(sizeBytes[:]) - len, _ := f.flowControl.LimitFlow(fd, writer, nil) + var reader io.Reader = fd + if metadata.LimitSize != "" { + limitSize, _ := strconv.ParseInt(metadata.LimitSize, 10, 64) + reader = io.LimitReader(reader, limitSize) + } + len, _ := f.flowControl.LimitFlow(reader, writer, nil) logger.Info("limitFlow", "len", len) return nil } +func (f *FileServer) parseNetAddr(addr string) (string, int) { + nodeName := GetNodeNameFromRemoteAddr(addr) + if nodeName != "" { + hostInfos, err := GetHostInfoFromConfig(discovery.HpfsNodeJsonFilePath) + if err != nil { + panic(err) + } + hostInfo, ok := hostInfos[nodeName] + if !ok { + panic(fmt.Sprintf("failed to find node %s in host infos", nodeName)) + } + return hostInfo.HpfsHost, int(hostInfo.FsPort) + } + return ParseNetAddr(addr) +} + func (f *FileServer) processDownloadRemote(logger logr.Logger, metadata ActionMetadata, conn net.Conn) error { - host, port := ParseNetAddr(metadata.RedirectAddr) + host, port := f.parseNetAddr(metadata.RedirectAddr) fileClient := NewFileClient(host, port, f.flowControl) remoteMetadata := metadata remoteMetadata.Action = ActionLocal2Remote2[metadata.Action] @@ -720,6 +891,7 @@ func (f *FileServer) readMetadata(reader io.Reader) (actionMeta ActionMetadata, Sink: metadata[MetadataSinkOffset], RequestId: metadata[MetadataRequestIdOffset], OssBufferSize: metadata[MetadataOssBufferSizeOffset], + LimitSize: metadata[MetadataLimitSize], } return } diff --git a/pkg/hpfs/filestream/xbstream_chunk.go b/pkg/hpfs/filestream/xbstream_chunk.go index 9f24761..e07e150 100644 --- a/pkg/hpfs/filestream/xbstream_chunk.go +++ b/pkg/hpfs/filestream/xbstream_chunk.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/binary" "fmt" + . "github.com/alibaba/polardbx-operator/pkg/hpfs/common" "hash" "hash/crc32" "io" diff --git a/pkg/hpfs/hpfs_grpc.go b/pkg/hpfs/hpfs_grpc.go index de7556a..79259f6 100644 --- a/pkg/hpfs/hpfs_grpc.go +++ b/pkg/hpfs/hpfs_grpc.go @@ -17,15 +17,21 @@ limitations under the License. package hpfs import ( + "bytes" "context" "errors" "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" "os" "os/exec" "os/user" "path" + "path/filepath" "strconv" "strings" + "time" "github.com/go-logr/logr" protobuf "github.com/golang/protobuf/proto" @@ -739,3 +745,193 @@ func (r *rpcService) ControlCgroupsBlkio(ctx context.Context, request *proto.Con return &proto.ControlCgroupsBlkioResponse{Status: r.ok("")}, nil } + +func (r *rpcService) OpenBackupBinlog(ctx context.Context, request *proto.OpenBackupBinlogRequest) (*proto.OpenBackupBinlogResponse, error) { + infoVersionFilepath := filepath.Join(request.GetLogDir(), backupbinlog.InfoVersionFilename) + versionFileExists, err := r.localFileService.IsExists(infoVersionFilepath) + if err != nil { + return &proto.OpenBackupBinlogResponse{Status: r.fail(err)}, nil + } + version := backupbinlog.InfoVersion + "=" + strconv.FormatInt(time.Now().Unix(), 10) + if !versionFileExists { + r.Info("the version file does not exists, create one", "filepath", infoVersionFilepath) + os.WriteFile(infoVersionFilepath, []byte(version), 0644) + } else { + data, err := os.ReadFile(infoVersionFilepath) + if err != nil { + return &proto.OpenBackupBinlogResponse{Status: r.fail(err)}, nil + } + version = string(data) + } + infoFilepath := filepath.Join(request.GetLogDir(), backupbinlog.InfoFilename) + exists, err := r.localFileService.IsExists(infoFilepath) + if err != nil { + return &proto.OpenBackupBinlogResponse{Status: r.fail(err)}, nil + } + if exists { + r.Info("the file exists, do update", "filepath", infoFilepath) + } + buf := bytes.Buffer{} + buf.Write([]byte(request.GetContent())) + buf.Write([]byte(version)) + buf.Write([]byte("\n")) + err = os.WriteFile(infoFilepath, buf.Bytes(), 0644) + if err != nil { + r.Error(err, "failed to write file", "filepath", infoFilepath) + return &proto.OpenBackupBinlogResponse{Status: r.fail(err)}, nil + } + return &proto.OpenBackupBinlogResponse{Status: r.ok("")}, nil +} + +func (r *rpcService) CloseBackupBinlog(ctx context.Context, request *proto.CloseBackupBinlogRequest) (*proto.CloseBackupBinlogResponse, error) { + infoFilepath := filepath.Join(request.GetLogDir(), backupbinlog.InfoFilename) + exists, err := r.localFileService.IsExists(infoFilepath) + if err != nil { + return &proto.CloseBackupBinlogResponse{Status: r.fail(err)}, nil + } + if exists { + err := os.Remove(infoFilepath) + if err != nil { + r.Error(err, "failed to remove bakcup binlog infoFile", "filepath", infoFilepath) + return &proto.CloseBackupBinlogResponse{Status: r.fail(err)}, nil + } + } + return &proto.CloseBackupBinlogResponse{Status: r.ok("")}, nil +} + +func (r *rpcService) UploadLatestBinlogFile(ctx context.Context, request *proto.UploadLatestBinlogFileRequest) (*proto.UploadLatestBinlogFileResponse, error) { + done := backupbinlog.UploadLatestBinlogFile(request.GetLogDir()) + r.Info(fmt.Sprintf("latest binlog file upload, done=%v", done)) + return &proto.UploadLatestBinlogFileResponse{Status: r.ok(""), Done: done}, nil +} + +func (r *rpcService) GetWatcherInfoHash(ctx context.Context, request *proto.GetWatcherInfoHashRequest) (*proto.GetWatcherInfoHashResponse, error) { + hash := backupbinlog.GetWatcherInfoHash(request.GetLogDir()) + return &proto.GetWatcherInfoHashResponse{Status: r.ok(""), Hash: hash}, nil +} + +func GetFileServiceParam(sinkName string, sinkType string) (err error, params map[string]string, auth map[string]string, fileServiceName string, returnSink config.Sink) { + var sinkPtr *config.Sink + for _, sink := range config.GetConfig().Sinks { + if sink.Name == sinkName && sink.Type == sinkType { + sinkPtr = &sink + returnSink = sink + break + } + } + if sinkPtr == nil { + err = fmt.Errorf("sink not found. type=%s name=%s", sinkType, sinkName) + return + } + auth = map[string]string{} + params = map[string]string{} + if sinkPtr.Type == config.SinkTypeOss { + auth["endpoint"] = sinkPtr.Endpoint + auth["access_key"] = sinkPtr.AccessKey + auth["access_secret"] = sinkPtr.AccessSecret + params["bucket"] = sinkPtr.Bucket + fileServiceName = "aliyun-oss" + } else if sinkPtr.Type == config.SinkTypeSftp { + auth["port"] = strconv.FormatInt(int64(sinkPtr.Port), 10) + auth["host"] = sinkPtr.Host + auth["username"] = sinkPtr.User + auth["password"] = sinkPtr.Password + fileServiceName = "sftp" + } + return +} + +func (r *rpcService) DeleteBinlogFilesBefore(ctx context.Context, request *proto.DeleteBinlogFilesBeforeRequest) (*proto.DeleteBinlogFilesBeforeResponse, error) { + err, params, auth, fileServiceName, sink := GetFileServiceParam(request.GetSinkName(), request.GetSinkType()) + if err != nil { + return &proto.DeleteBinlogFilesBeforeResponse{Status: r.fail(err)}, nil + } + params["deadline"] = strconv.FormatInt(request.GetUnixTime(), 10) + pxcBinlogDir := config.GetPxcBinlogStorageDirectory(request.GetNamespace(), request.GetPxcName(), request.GetPxcUid()) + if sink.RootPath != "" && !strings.HasPrefix(pxcBinlogDir, "/") { + pxcBinlogDir = filepath.Join(sink.RootPath, pxcBinlogDir) + } + expiredFiles := make([]string, 0) + expiredFilesPtr := &expiredFiles + fileService, err := remote.GetFileService(fileServiceName) + if err != nil { + r.Error(err, "Failed to get file service") + return &proto.DeleteBinlogFilesBeforeResponse{Status: r.fail(err)}, nil + } + ctx = context.WithValue(ctx, common.AffectedFiles, expiredFilesPtr) + ft, err := fileService.DeleteExpiredFile(ctx, pxcBinlogDir, auth, params) + if err == nil { + err = ft.Wait() + } + if err != nil { + return &proto.DeleteBinlogFilesBeforeResponse{Status: r.fail(err)}, nil + } + return &proto.DeleteBinlogFilesBeforeResponse{Status: r.ok(""), DeletedFiles: *expiredFilesPtr}, nil +} + +func (r *rpcService) ListLocalBinlogList(ctx context.Context, request *proto.ListLocalBinlogListRequest) (*proto.ListLocalBinlogListResponse, error) { + logDir := request.GetLogDir() + exists, err := r.localFileService.IsExists(logDir) + if err != nil { + return &proto.ListLocalBinlogListResponse{Status: r.fail(err)}, nil + } + if !exists { + return &proto.ListLocalBinlogListResponse{Status: r.fail(errors.New(fmt.Sprintf("not found filepath = %s", logDir)))}, nil + } + versionFilepath := filepath.Join(logDir, backupbinlog.InfoVersionFilename) + versionBytes, err := os.ReadFile(versionFilepath) + if err != nil { + return &proto.ListLocalBinlogListResponse{Status: r.fail(err)}, nil + } + index := bytes.IndexByte(versionBytes, '=') + var version string + if index >= 0 { + version = string(versionBytes[index+1:]) + } + indexFilepath := filepath.Join(logDir, backupbinlog.IndexFilename) + indexBytes, err := os.ReadFile(indexFilepath) + if err != nil { + return &proto.ListLocalBinlogListResponse{Status: r.fail(err)}, nil + } + indexFileContent := string(indexBytes) + binlogFiles := make([]string, 0, 20) + for _, file := range strings.Split(indexFileContent, "\n") { + if len(file) > 0 { + binlogFiles = append(binlogFiles, file) + } + } + return &proto.ListLocalBinlogListResponse{ + Version: version, + BinlogFiles: binlogFiles, + Status: r.ok(""), + }, nil +} + +func (r *rpcService) ListRemoteBinlogList(ctx context.Context, request *proto.ListRemoteBinlogListRequest) (*proto.ListRemoteBinlogListResponse, error) { + err, params, auth, fileServiceName, sink := GetFileServiceParam(request.GetSinkName(), request.GetSinkType()) + if err != nil { + return &proto.ListRemoteBinlogListResponse{Status: r.fail(err)}, nil + } + fileService, err := remote.GetFileService(fileServiceName) + if err != nil { + r.Error(err, "Failed to get file service") + return &proto.ListRemoteBinlogListResponse{Status: r.fail(err)}, nil + } + resultFiles := make([]string, 0) + resultFilesPtr := &resultFiles + ctx = context.WithValue(ctx, common.AffectedFiles, resultFilesPtr) + xstoreBinlogDir := config.GetXStorePodBinlogStorageDirectory(request.GetNamespace(), request.GetPxcName(), request.GetPxcUid(), request.GetXStoreName(), request.GetXStoreUid(), request.GetPodName()) + if sink.RootPath != "" && !strings.HasPrefix(xstoreBinlogDir, "/") { + xstoreBinlogDir = filepath.Join(sink.RootPath, xstoreBinlogDir) + } + params["deadline"] = strconv.FormatInt(time.Now().Unix()+3600, 10) + ft, err := fileService.ListAllFiles(ctx, xstoreBinlogDir, auth, params) + if err == nil { + err = ft.Wait() + } + if err != nil { + return &proto.ListRemoteBinlogListResponse{Status: r.fail(err)}, nil + } + return &proto.ListRemoteBinlogListResponse{Status: r.ok(""), Files: *resultFilesPtr}, nil + return nil, nil +} diff --git a/pkg/hpfs/hpfs_proxy.go b/pkg/hpfs/hpfs_proxy.go index a81fec2..b4a73be 100644 --- a/pkg/hpfs/hpfs_proxy.go +++ b/pkg/hpfs/hpfs_proxy.go @@ -41,6 +41,81 @@ type proxy struct { local proto.HpfsServiceServer } +func (p *proxy) GetWatcherInfoHash(ctx context.Context, request *proto.GetWatcherInfoHashRequest) (*proto.GetWatcherInfoHashResponse, error) { + resp, err := p.executeOnHost(ctx, "GetWatcherInfoHash", request.Host, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.GetWatcherInfoHash(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.GetWatcherInfoHash(ctx, request) + }, + ) + if err != nil { + return nil, err + } + return resp.(*proto.GetWatcherInfoHashResponse), err +} + +func (p *proxy) OpenBackupBinlog(ctx context.Context, request *proto.OpenBackupBinlogRequest) (*proto.OpenBackupBinlogResponse, error) { + resp, err := p.executeOnHost(ctx, "OpenBackupBinlog", request.Host, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.OpenBackupBinlog(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.OpenBackupBinlog(ctx, request) + }, + ) + if err != nil { + return nil, err + } + return resp.(*proto.OpenBackupBinlogResponse), err +} + +func (p *proxy) CloseBackupBinlog(ctx context.Context, request *proto.CloseBackupBinlogRequest) (*proto.CloseBackupBinlogResponse, error) { + resp, err := p.executeOnHost(ctx, "CloseBackupBinlog", request.Host, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.CloseBackupBinlog(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.CloseBackupBinlog(ctx, request) + }, + ) + if err != nil { + return nil, err + } + return resp.(*proto.CloseBackupBinlogResponse), err +} + +func (p *proxy) UploadLatestBinlogFile(ctx context.Context, request *proto.UploadLatestBinlogFileRequest) (*proto.UploadLatestBinlogFileResponse, error) { + resp, err := p.executeOnHost(ctx, "UploadLatestBinlogFile", request.Host, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.UploadLatestBinlogFile(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.UploadLatestBinlogFile(ctx, request) + }, + ) + if err != nil { + return nil, err + } + return resp.(*proto.UploadLatestBinlogFileResponse), err +} + +func (p *proxy) DeleteBinlogFilesBefore(ctx context.Context, request *proto.DeleteBinlogFilesBeforeRequest) (*proto.DeleteBinlogFilesBeforeResponse, error) { + resp, err := p.executeOnHost(ctx, "DeleteBinlogFilesBefore", nil, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.DeleteBinlogFilesBefore(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.DeleteBinlogFilesBefore(ctx, request) + }, + ) + if err != nil { + return nil, err + } + return resp.(*proto.DeleteBinlogFilesBeforeResponse), err +} + func (p *proxy) executeOnHost(ctx context.Context, api string, host *proto.Host, request protobuf.Message, remoteFn func(c proto.HpfsServiceClient) (protobuf.Message, error), localFn func(s proto.HpfsServiceServer) (protobuf.Message, error)) (protobuf.Message, error) { @@ -345,6 +420,38 @@ func (p *proxy) ShowDiskInfo(ctx context.Context, request *proto.ShowDiskInfoReq return resp.(*proto.ShowDiskInfoResponse), err } +func (p *proxy) ListLocalBinlogList(ctx context.Context, request *proto.ListLocalBinlogListRequest) (*proto.ListLocalBinlogListResponse, error) { + resp, err := p.executeOnHost(ctx, "ListLocalBinlogList", request.Host, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.ListLocalBinlogList(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.ListLocalBinlogList(ctx, request) + }, + ) + + if err != nil { + return nil, err + } + return resp.(*proto.ListLocalBinlogListResponse), err +} + +func (p *proxy) ListRemoteBinlogList(ctx context.Context, request *proto.ListRemoteBinlogListRequest) (*proto.ListRemoteBinlogListResponse, error) { + resp, err := p.executeOnHost(ctx, "ListRemoteBinlogList", nil, request, + func(c proto.HpfsServiceClient) (protobuf.Message, error) { + return c.ListRemoteBinlogList(ctx, request) + }, + func(s proto.HpfsServiceServer) (protobuf.Message, error) { + return s.ListRemoteBinlogList(ctx, request) + }, + ) + + if err != nil { + return nil, err + } + return resp.(*proto.ListRemoteBinlogListResponse), err +} + func NewHpfsServiceServer(hostDiscovery discovery.HostDiscovery, localFileService local.LocalFileService, taskManager task.Manager) proto.HpfsServiceServer { logger := zap.New(zap.UseDevMode(true)) diff --git a/pkg/hpfs/proto/hpfs.pb.go b/pkg/hpfs/proto/hpfs.pb.go index a59b0cb..c467c72 100644 --- a/pkg/hpfs/proto/hpfs.pb.go +++ b/pkg/hpfs/proto/hpfs.pb.go @@ -3471,6 +3471,903 @@ func (x *ControlCgroupsBlkioResponse) GetStatus() *Status { return nil } +type OpenBackupBinlogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target host + Host *Host `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + //the log directory of the pod on the host + LogDir string `protobuf:"bytes,2,opt,name=log_dir,json=logDir,proto3" json:"log_dir,omitempty"` + //the info file content + Content string `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` +} + +func (x *OpenBackupBinlogRequest) Reset() { + *x = OpenBackupBinlogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenBackupBinlogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenBackupBinlogRequest) ProtoMessage() {} + +func (x *OpenBackupBinlogRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenBackupBinlogRequest.ProtoReflect.Descriptor instead. +func (*OpenBackupBinlogRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{51} +} + +func (x *OpenBackupBinlogRequest) GetHost() *Host { + if x != nil { + return x.Host + } + return nil +} + +func (x *OpenBackupBinlogRequest) GetLogDir() string { + if x != nil { + return x.LogDir + } + return "" +} + +func (x *OpenBackupBinlogRequest) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +type OpenBackupBinlogResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *OpenBackupBinlogResponse) Reset() { + *x = OpenBackupBinlogResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenBackupBinlogResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenBackupBinlogResponse) ProtoMessage() {} + +func (x *OpenBackupBinlogResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenBackupBinlogResponse.ProtoReflect.Descriptor instead. +func (*OpenBackupBinlogResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{52} +} + +func (x *OpenBackupBinlogResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +type CloseBackupBinlogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target host + Host *Host `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + //the log directory of the pod on the host + LogDir string `protobuf:"bytes,2,opt,name=log_dir,json=logDir,proto3" json:"log_dir,omitempty"` +} + +func (x *CloseBackupBinlogRequest) Reset() { + *x = CloseBackupBinlogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloseBackupBinlogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseBackupBinlogRequest) ProtoMessage() {} + +func (x *CloseBackupBinlogRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseBackupBinlogRequest.ProtoReflect.Descriptor instead. +func (*CloseBackupBinlogRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{53} +} + +func (x *CloseBackupBinlogRequest) GetHost() *Host { + if x != nil { + return x.Host + } + return nil +} + +func (x *CloseBackupBinlogRequest) GetLogDir() string { + if x != nil { + return x.LogDir + } + return "" +} + +type CloseBackupBinlogResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *CloseBackupBinlogResponse) Reset() { + *x = CloseBackupBinlogResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloseBackupBinlogResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseBackupBinlogResponse) ProtoMessage() {} + +func (x *CloseBackupBinlogResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseBackupBinlogResponse.ProtoReflect.Descriptor instead. +func (*CloseBackupBinlogResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{54} +} + +func (x *CloseBackupBinlogResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +type UploadLatestBinlogFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target host + Host *Host `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + //the log directory of the pod on the host + LogDir string `protobuf:"bytes,2,opt,name=log_dir,json=logDir,proto3" json:"log_dir,omitempty"` +} + +func (x *UploadLatestBinlogFileRequest) Reset() { + *x = UploadLatestBinlogFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadLatestBinlogFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadLatestBinlogFileRequest) ProtoMessage() {} + +func (x *UploadLatestBinlogFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadLatestBinlogFileRequest.ProtoReflect.Descriptor instead. +func (*UploadLatestBinlogFileRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{55} +} + +func (x *UploadLatestBinlogFileRequest) GetHost() *Host { + if x != nil { + return x.Host + } + return nil +} + +func (x *UploadLatestBinlogFileRequest) GetLogDir() string { + if x != nil { + return x.LogDir + } + return "" +} + +type UploadLatestBinlogFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // check if the latest binlog file is uploaded + Done bool `protobuf:"varint,2,opt,name=done,proto3" json:"done,omitempty"` +} + +func (x *UploadLatestBinlogFileResponse) Reset() { + *x = UploadLatestBinlogFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UploadLatestBinlogFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadLatestBinlogFileResponse) ProtoMessage() {} + +func (x *UploadLatestBinlogFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadLatestBinlogFileResponse.ProtoReflect.Descriptor instead. +func (*UploadLatestBinlogFileResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{56} +} + +func (x *UploadLatestBinlogFileResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *UploadLatestBinlogFileResponse) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +type GetWatcherInfoHashRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target host + Host *Host `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + //log dir. also worker dir of the watcher + LogDir string `protobuf:"bytes,2,opt,name=logDir,proto3" json:"logDir,omitempty"` +} + +func (x *GetWatcherInfoHashRequest) Reset() { + *x = GetWatcherInfoHashRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetWatcherInfoHashRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWatcherInfoHashRequest) ProtoMessage() {} + +func (x *GetWatcherInfoHashRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWatcherInfoHashRequest.ProtoReflect.Descriptor instead. +func (*GetWatcherInfoHashRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{57} +} + +func (x *GetWatcherInfoHashRequest) GetHost() *Host { + if x != nil { + return x.Host + } + return nil +} + +func (x *GetWatcherInfoHashRequest) GetLogDir() string { + if x != nil { + return x.LogDir + } + return "" +} + +type GetWatcherInfoHashResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Hash value of info file + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (x *GetWatcherInfoHashResponse) Reset() { + *x = GetWatcherInfoHashResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetWatcherInfoHashResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWatcherInfoHashResponse) ProtoMessage() {} + +func (x *GetWatcherInfoHashResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWatcherInfoHashResponse.ProtoReflect.Descriptor instead. +func (*GetWatcherInfoHashResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{58} +} + +func (x *GetWatcherInfoHashResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *GetWatcherInfoHashResponse) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +type DeleteBinlogFilesBeforeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + //the k8s namespace + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // the instance name of PolarDB-X + PxcName string `protobuf:"bytes,2,opt,name=pxcName,proto3" json:"pxcName,omitempty"` + //the object uid of PolarDB-X + PxcUid string `protobuf:"bytes,3,opt,name=pxcUid,proto3" json:"pxcUid,omitempty"` + //the timestamp unit: seconds + UnixTime int64 `protobuf:"varint,4,opt,name=unixTime,proto3" json:"unixTime,omitempty"` + //the sink type + SinkType string `protobuf:"bytes,5,opt,name=sinkType,proto3" json:"sinkType,omitempty"` + //the sink name + SinkName string `protobuf:"bytes,6,opt,name=sinkName,proto3" json:"sinkName,omitempty"` +} + +func (x *DeleteBinlogFilesBeforeRequest) Reset() { + *x = DeleteBinlogFilesBeforeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteBinlogFilesBeforeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBinlogFilesBeforeRequest) ProtoMessage() {} + +func (x *DeleteBinlogFilesBeforeRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBinlogFilesBeforeRequest.ProtoReflect.Descriptor instead. +func (*DeleteBinlogFilesBeforeRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{59} +} + +func (x *DeleteBinlogFilesBeforeRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DeleteBinlogFilesBeforeRequest) GetPxcName() string { + if x != nil { + return x.PxcName + } + return "" +} + +func (x *DeleteBinlogFilesBeforeRequest) GetPxcUid() string { + if x != nil { + return x.PxcUid + } + return "" +} + +func (x *DeleteBinlogFilesBeforeRequest) GetUnixTime() int64 { + if x != nil { + return x.UnixTime + } + return 0 +} + +func (x *DeleteBinlogFilesBeforeRequest) GetSinkType() string { + if x != nil { + return x.SinkType + } + return "" +} + +func (x *DeleteBinlogFilesBeforeRequest) GetSinkName() string { + if x != nil { + return x.SinkName + } + return "" +} + +type DeleteBinlogFilesBeforeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // delete file list + DeletedFiles []string `protobuf:"bytes,2,rep,name=deletedFiles,proto3" json:"deletedFiles,omitempty"` + // check if the latest binlog file is uploaded + Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` +} + +func (x *DeleteBinlogFilesBeforeResponse) Reset() { + *x = DeleteBinlogFilesBeforeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteBinlogFilesBeforeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBinlogFilesBeforeResponse) ProtoMessage() {} + +func (x *DeleteBinlogFilesBeforeResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBinlogFilesBeforeResponse.ProtoReflect.Descriptor instead. +func (*DeleteBinlogFilesBeforeResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{60} +} + +func (x *DeleteBinlogFilesBeforeResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *DeleteBinlogFilesBeforeResponse) GetDeletedFiles() []string { + if x != nil { + return x.DeletedFiles + } + return nil +} + +func (x *DeleteBinlogFilesBeforeResponse) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +type ListLocalBinlogListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target host + Host *Host `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + //log dir. also worker dir of the watcher + LogDir string `protobuf:"bytes,2,opt,name=logDir,proto3" json:"logDir,omitempty"` +} + +func (x *ListLocalBinlogListRequest) Reset() { + *x = ListLocalBinlogListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocalBinlogListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocalBinlogListRequest) ProtoMessage() {} + +func (x *ListLocalBinlogListRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocalBinlogListRequest.ProtoReflect.Descriptor instead. +func (*ListLocalBinlogListRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{61} +} + +func (x *ListLocalBinlogListRequest) GetHost() *Host { + if x != nil { + return x.Host + } + return nil +} + +func (x *ListLocalBinlogListRequest) GetLogDir() string { + if x != nil { + return x.LogDir + } + return "" +} + +type ListLocalBinlogListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + //backup binlog version + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // binlog file list + BinlogFiles []string `protobuf:"bytes,3,rep,name=binlogFiles,proto3" json:"binlogFiles,omitempty"` +} + +func (x *ListLocalBinlogListResponse) Reset() { + *x = ListLocalBinlogListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocalBinlogListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocalBinlogListResponse) ProtoMessage() {} + +func (x *ListLocalBinlogListResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocalBinlogListResponse.ProtoReflect.Descriptor instead. +func (*ListLocalBinlogListResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{62} +} + +func (x *ListLocalBinlogListResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *ListLocalBinlogListResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ListLocalBinlogListResponse) GetBinlogFiles() []string { + if x != nil { + return x.BinlogFiles + } + return nil +} + +type ListRemoteBinlogListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + //the k8s namespace + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // the instance name of PolarDB-X + PxcName string `protobuf:"bytes,2,opt,name=pxcName,proto3" json:"pxcName,omitempty"` + //the object uid of PolarDB-X + PxcUid string `protobuf:"bytes,3,opt,name=pxcUid,proto3" json:"pxcUid,omitempty"` + // the instance name of xstore + XStoreName string `protobuf:"bytes,4,opt,name=xStoreName,proto3" json:"xStoreName,omitempty"` + //the object uid of xstore + XStoreUid string `protobuf:"bytes,5,opt,name=xStoreUid,proto3" json:"xStoreUid,omitempty"` + //the pod name + PodName string `protobuf:"bytes,6,opt,name=podName,proto3" json:"podName,omitempty"` + //the sink name + SinkName string `protobuf:"bytes,7,opt,name=sinkName,proto3" json:"sinkName,omitempty"` + //the sink type + SinkType string `protobuf:"bytes,8,opt,name=sinkType,proto3" json:"sinkType,omitempty"` +} + +func (x *ListRemoteBinlogListRequest) Reset() { + *x = ListRemoteBinlogListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRemoteBinlogListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRemoteBinlogListRequest) ProtoMessage() {} + +func (x *ListRemoteBinlogListRequest) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRemoteBinlogListRequest.ProtoReflect.Descriptor instead. +func (*ListRemoteBinlogListRequest) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{63} +} + +func (x *ListRemoteBinlogListRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetPxcName() string { + if x != nil { + return x.PxcName + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetPxcUid() string { + if x != nil { + return x.PxcUid + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetXStoreName() string { + if x != nil { + return x.XStoreName + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetXStoreUid() string { + if x != nil { + return x.XStoreUid + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetPodName() string { + if x != nil { + return x.PodName + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetSinkName() string { + if x != nil { + return x.SinkName + } + return "" +} + +func (x *ListRemoteBinlogListRequest) GetSinkType() string { + if x != nil { + return x.SinkType + } + return "" +} + +type ListRemoteBinlogListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation status. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + //the files + Files []string `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"` +} + +func (x *ListRemoteBinlogListResponse) Reset() { + *x = ListRemoteBinlogListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_hpfs_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRemoteBinlogListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRemoteBinlogListResponse) ProtoMessage() {} + +func (x *ListRemoteBinlogListResponse) ProtoReflect() protoreflect.Message { + mi := &file_hpfs_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRemoteBinlogListResponse.ProtoReflect.Descriptor instead. +func (*ListRemoteBinlogListResponse) Descriptor() ([]byte, []int) { + return file_hpfs_proto_rawDescGZIP(), []int{64} +} + +func (x *ListRemoteBinlogListResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *ListRemoteBinlogListResponse) GetFiles() []string { + if x != nil { + return x.Files + } + return nil +} + var File_hpfs_proto protoreflect.FileDescriptor var file_hpfs_proto_rawDesc = []byte{ @@ -3831,102 +4728,244 @@ var file_hpfs_proto_rawDesc = []byte{ 0x42, 0x6c, 0x6b, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2a, 0x69, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, - 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x49, - 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, - 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x09, 0x2a, - 0x46, 0x0a, 0x08, 0x42, 0x6c, 0x6b, 0x69, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0d, 0x0a, 0x09, 0x49, - 0x4f, 0x50, 0x53, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4f, - 0x50, 0x53, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x50, - 0x53, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x50, 0x53, 0x5f, - 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x03, 0x32, 0xff, 0x09, 0x0a, 0x0b, 0x48, 0x70, 0x66, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0a, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x5b, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, - 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x69, 0x63, 0x4c, - 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, - 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0f, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1d, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x6d, 0x0a, 0x17, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x18, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x22, 0x42, 0x0a, 0x19, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x59, 0x0a, 0x1d, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x22, 0x5b, 0x0a, 0x1e, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x22, 0x54, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x6f, 0x73, 0x74, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x22, 0x57, + 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0xc4, 0x01, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x78, 0x63, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x78, 0x63, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x78, 0x63, 0x55, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x78, 0x63, 0x55, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x6e, + 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x75, 0x6e, + 0x69, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x80, + 0x01, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, + 0x65, 0x22, 0x55, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x44, 0x69, 0x72, 0x22, 0x80, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, + 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xfd, 0x01, 0x0a, 0x1b, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x78, 0x63, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x78, 0x63, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x78, 0x63, 0x55, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x78, 0x63, 0x55, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x78, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x78, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x78, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x78, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x6f, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x5b, 0x0a, 0x1c, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2a, 0x69, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, + 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, + 0x43, 0x45, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, + 0x45, 0x4c, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x09, 0x2a, 0x46, 0x0a, 0x08, 0x42, 0x6c, 0x6b, 0x69, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x0d, 0x0a, 0x09, 0x49, 0x4f, 0x50, 0x53, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x0e, + 0x0a, 0x0a, 0x49, 0x4f, 0x50, 0x53, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x42, 0x50, 0x53, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, + 0x42, 0x50, 0x53, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x03, 0x32, 0xa5, 0x0f, 0x0a, 0x0b, + 0x48, 0x70, 0x66, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x65, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x43, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x43, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0c, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, - 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x75, - 0x6e, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x42, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, - 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6c, - 0x65, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x55, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x13, 0x53, 0x68, 0x6f, 0x77, - 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x79, 0x6e, - 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1d, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0d, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4c, 0x0a, 0x0d, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, - 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, - 0x0a, 0x0c, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, + 0x6d, 0x62, 0x6f, 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x69, + 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6d, 0x62, 0x6f, + 0x6c, 0x69, 0x63, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x52, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0c, 0x54, 0x72, 0x75, 0x6e, + 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x75, + 0x6e, 0x63, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x6f, 0x77, + 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0b, 0x55, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x13, + 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, + 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, + 0x68, 0x6f, 0x77, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0f, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x73, + 0x79, 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x42, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, + 0x73, 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0d, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, + 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, + 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, + 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x13, 0x43, 0x6f, 0x6e, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, + 0x13, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x42, + 0x6c, 0x6b, 0x69, 0x6f, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x42, 0x6c, 0x6b, 0x69, 0x6f, - 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x42, 0x6c, 0x6b, 0x69, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x42, 0x6c, 0x6b, 0x69, 0x6f, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x6c, 0x69, 0x62, 0x61, 0x62, 0x61, 0x2f, - 0x70, 0x6f, 0x6c, 0x61, 0x72, 0x64, 0x62, 0x78, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, - 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x68, 0x70, 0x66, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x42, 0x6c, + 0x6b, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, + 0x10, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x11, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, + 0x0a, 0x16, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x12, 0x20, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, + 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5e, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x61, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x42, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x6c, 0x69, 0x62, 0x61, 0x62, 0x61, 0x2f, 0x70, 0x6f, 0x6c, 0x61, 0x72, 0x64, + 0x62, 0x78, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x68, 0x70, 0x66, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -3942,169 +4981,209 @@ func file_hpfs_proto_rawDescGZIP() []byte { } var file_hpfs_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_hpfs_proto_msgTypes = make([]protoimpl.MessageInfo, 53) +var file_hpfs_proto_msgTypes = make([]protoimpl.MessageInfo, 67) var file_hpfs_proto_goTypes = []interface{}{ - (TaskStatus)(0), // 0: proto.TaskStatus - (BlkioKey)(0), // 1: proto.BlkioKey - (Status_StatusCode)(0), // 2: proto.Status.StatusCode - (*Host)(nil), // 3: proto.Host - (*FileInfo)(nil), // 4: proto.FileInfo - (*FileStats)(nil), // 5: proto.FileStats - (*File)(nil), // 6: proto.File - (*Status)(nil), // 7: proto.Status - (*FileRequest)(nil), // 8: proto.FileRequest - (*CreateFileOptions)(nil), // 9: proto.CreateFileOptions - (*CreateFileRequest)(nil), // 10: proto.CreateFileRequest - (*CreateFileResponse)(nil), // 11: proto.CreateFileResponse - (*CreateDirectoryOptions)(nil), // 12: proto.CreateDirectoryOptions - (*CreateDirectoryRequest)(nil), // 13: proto.CreateDirectoryRequest - (*CreateDirectoryResponse)(nil), // 14: proto.CreateDirectoryResponse - (*CreateSymbolicLinkOptions)(nil), // 15: proto.CreateSymbolicLinkOptions - (*CreateSymbolicLinkRequest)(nil), // 16: proto.CreateSymbolicLinkRequest - (*CreateSymbolicLinkResponse)(nil), // 17: proto.CreateSymbolicLinkResponse - (*RemoveOptions)(nil), // 18: proto.RemoveOptions - (*ListDirectoryRequest)(nil), // 19: proto.ListDirectoryRequest - (*ListDirectoryResponse)(nil), // 20: proto.ListDirectoryResponse - (*RemoveDirectoryRequest)(nil), // 21: proto.RemoveDirectoryRequest - (*RemoveDirectoryResponse)(nil), // 22: proto.RemoveDirectoryResponse - (*RemoveFileRequest)(nil), // 23: proto.RemoveFileRequest - (*RemoveFileResponse)(nil), // 24: proto.RemoveFileResponse - (*TruncateFileOptions)(nil), // 25: proto.TruncateFileOptions - (*TruncateFileRequest)(nil), // 26: proto.TruncateFileRequest - (*TruncateFileResponse)(nil), // 27: proto.TruncateFileResponse - (*RemoteFsEndpoint)(nil), // 28: proto.RemoteFsEndpoint - (*AsyncTask)(nil), // 29: proto.AsyncTask - (*AsyncTaskRequest)(nil), // 30: proto.AsyncTaskRequest - (*DownloadSource)(nil), // 31: proto.DownloadSource - (*DownloadTask)(nil), // 32: proto.DownloadTask - (*DownloadRequest)(nil), // 33: proto.DownloadRequest - (*DownloadResponse)(nil), // 34: proto.DownloadResponse - (*TransferRequest)(nil), // 35: proto.TransferRequest - (*TransferResponse)(nil), // 36: proto.TransferResponse - (*ShowDiskUsageRequest)(nil), // 37: proto.ShowDiskUsageRequest - (*ShowDiskUsageResponse)(nil), // 38: proto.ShowDiskUsageResponse - (*ShowAsyncTaskStatusRequest)(nil), // 39: proto.ShowAsyncTaskStatusRequest - (*ShowAsyncTaskStatusResponse)(nil), // 40: proto.ShowAsyncTaskStatusResponse - (*CancelAsyncTaskRequest)(nil), // 41: proto.CancelAsyncTaskRequest - (*CancelAsyncTaskResponse)(nil), // 42: proto.CancelAsyncTaskResponse - (*UploadTarget)(nil), // 43: proto.UploadTarget - (*UploadRequest)(nil), // 44: proto.UploadRequest - (*UploadResponse)(nil), // 45: proto.UploadResponse - (*DeleteRemoteFileRequest)(nil), // 46: proto.DeleteRemoteFileRequest - (*DeleteRemoteFileResponse)(nil), // 47: proto.DeleteRemoteFileResponse - (*DiskInfo)(nil), // 48: proto.DiskInfo - (*ShowDiskInfoRequest)(nil), // 49: proto.ShowDiskInfoRequest - (*ShowDiskInfoResponse)(nil), // 50: proto.ShowDiskInfoResponse - (*BlkioCtrl)(nil), // 51: proto.BlkioCtrl - (*ControlCgroupsBlkioRequest)(nil), // 52: proto.ControlCgroupsBlkioRequest - (*ControlCgroupsBlkioResponse)(nil), // 53: proto.ControlCgroupsBlkioResponse - nil, // 54: proto.RemoteFsEndpoint.AuthEntry - nil, // 55: proto.RemoteFsEndpoint.OtherEntry - (*timestamp.Timestamp)(nil), // 56: google.protobuf.Timestamp + (TaskStatus)(0), // 0: proto.TaskStatus + (BlkioKey)(0), // 1: proto.BlkioKey + (Status_StatusCode)(0), // 2: proto.Status.StatusCode + (*Host)(nil), // 3: proto.Host + (*FileInfo)(nil), // 4: proto.FileInfo + (*FileStats)(nil), // 5: proto.FileStats + (*File)(nil), // 6: proto.File + (*Status)(nil), // 7: proto.Status + (*FileRequest)(nil), // 8: proto.FileRequest + (*CreateFileOptions)(nil), // 9: proto.CreateFileOptions + (*CreateFileRequest)(nil), // 10: proto.CreateFileRequest + (*CreateFileResponse)(nil), // 11: proto.CreateFileResponse + (*CreateDirectoryOptions)(nil), // 12: proto.CreateDirectoryOptions + (*CreateDirectoryRequest)(nil), // 13: proto.CreateDirectoryRequest + (*CreateDirectoryResponse)(nil), // 14: proto.CreateDirectoryResponse + (*CreateSymbolicLinkOptions)(nil), // 15: proto.CreateSymbolicLinkOptions + (*CreateSymbolicLinkRequest)(nil), // 16: proto.CreateSymbolicLinkRequest + (*CreateSymbolicLinkResponse)(nil), // 17: proto.CreateSymbolicLinkResponse + (*RemoveOptions)(nil), // 18: proto.RemoveOptions + (*ListDirectoryRequest)(nil), // 19: proto.ListDirectoryRequest + (*ListDirectoryResponse)(nil), // 20: proto.ListDirectoryResponse + (*RemoveDirectoryRequest)(nil), // 21: proto.RemoveDirectoryRequest + (*RemoveDirectoryResponse)(nil), // 22: proto.RemoveDirectoryResponse + (*RemoveFileRequest)(nil), // 23: proto.RemoveFileRequest + (*RemoveFileResponse)(nil), // 24: proto.RemoveFileResponse + (*TruncateFileOptions)(nil), // 25: proto.TruncateFileOptions + (*TruncateFileRequest)(nil), // 26: proto.TruncateFileRequest + (*TruncateFileResponse)(nil), // 27: proto.TruncateFileResponse + (*RemoteFsEndpoint)(nil), // 28: proto.RemoteFsEndpoint + (*AsyncTask)(nil), // 29: proto.AsyncTask + (*AsyncTaskRequest)(nil), // 30: proto.AsyncTaskRequest + (*DownloadSource)(nil), // 31: proto.DownloadSource + (*DownloadTask)(nil), // 32: proto.DownloadTask + (*DownloadRequest)(nil), // 33: proto.DownloadRequest + (*DownloadResponse)(nil), // 34: proto.DownloadResponse + (*TransferRequest)(nil), // 35: proto.TransferRequest + (*TransferResponse)(nil), // 36: proto.TransferResponse + (*ShowDiskUsageRequest)(nil), // 37: proto.ShowDiskUsageRequest + (*ShowDiskUsageResponse)(nil), // 38: proto.ShowDiskUsageResponse + (*ShowAsyncTaskStatusRequest)(nil), // 39: proto.ShowAsyncTaskStatusRequest + (*ShowAsyncTaskStatusResponse)(nil), // 40: proto.ShowAsyncTaskStatusResponse + (*CancelAsyncTaskRequest)(nil), // 41: proto.CancelAsyncTaskRequest + (*CancelAsyncTaskResponse)(nil), // 42: proto.CancelAsyncTaskResponse + (*UploadTarget)(nil), // 43: proto.UploadTarget + (*UploadRequest)(nil), // 44: proto.UploadRequest + (*UploadResponse)(nil), // 45: proto.UploadResponse + (*DeleteRemoteFileRequest)(nil), // 46: proto.DeleteRemoteFileRequest + (*DeleteRemoteFileResponse)(nil), // 47: proto.DeleteRemoteFileResponse + (*DiskInfo)(nil), // 48: proto.DiskInfo + (*ShowDiskInfoRequest)(nil), // 49: proto.ShowDiskInfoRequest + (*ShowDiskInfoResponse)(nil), // 50: proto.ShowDiskInfoResponse + (*BlkioCtrl)(nil), // 51: proto.BlkioCtrl + (*ControlCgroupsBlkioRequest)(nil), // 52: proto.ControlCgroupsBlkioRequest + (*ControlCgroupsBlkioResponse)(nil), // 53: proto.ControlCgroupsBlkioResponse + (*OpenBackupBinlogRequest)(nil), // 54: proto.OpenBackupBinlogRequest + (*OpenBackupBinlogResponse)(nil), // 55: proto.OpenBackupBinlogResponse + (*CloseBackupBinlogRequest)(nil), // 56: proto.CloseBackupBinlogRequest + (*CloseBackupBinlogResponse)(nil), // 57: proto.CloseBackupBinlogResponse + (*UploadLatestBinlogFileRequest)(nil), // 58: proto.UploadLatestBinlogFileRequest + (*UploadLatestBinlogFileResponse)(nil), // 59: proto.UploadLatestBinlogFileResponse + (*GetWatcherInfoHashRequest)(nil), // 60: proto.GetWatcherInfoHashRequest + (*GetWatcherInfoHashResponse)(nil), // 61: proto.GetWatcherInfoHashResponse + (*DeleteBinlogFilesBeforeRequest)(nil), // 62: proto.DeleteBinlogFilesBeforeRequest + (*DeleteBinlogFilesBeforeResponse)(nil), // 63: proto.DeleteBinlogFilesBeforeResponse + (*ListLocalBinlogListRequest)(nil), // 64: proto.ListLocalBinlogListRequest + (*ListLocalBinlogListResponse)(nil), // 65: proto.ListLocalBinlogListResponse + (*ListRemoteBinlogListRequest)(nil), // 66: proto.ListRemoteBinlogListRequest + (*ListRemoteBinlogListResponse)(nil), // 67: proto.ListRemoteBinlogListResponse + nil, // 68: proto.RemoteFsEndpoint.AuthEntry + nil, // 69: proto.RemoteFsEndpoint.OtherEntry + (*timestamp.Timestamp)(nil), // 70: google.protobuf.Timestamp } var file_hpfs_proto_depIdxs = []int32{ - 56, // 0: proto.FileInfo.mod_time:type_name -> google.protobuf.Timestamp - 5, // 1: proto.File.stats:type_name -> proto.FileStats - 2, // 2: proto.Status.code:type_name -> proto.Status.StatusCode - 3, // 3: proto.CreateFileRequest.host:type_name -> proto.Host - 9, // 4: proto.CreateFileRequest.options:type_name -> proto.CreateFileOptions - 8, // 5: proto.CreateFileRequest.file:type_name -> proto.FileRequest - 7, // 6: proto.CreateFileResponse.status:type_name -> proto.Status - 3, // 7: proto.CreateDirectoryRequest.host:type_name -> proto.Host - 12, // 8: proto.CreateDirectoryRequest.options:type_name -> proto.CreateDirectoryOptions - 8, // 9: proto.CreateDirectoryRequest.directory:type_name -> proto.FileRequest - 7, // 10: proto.CreateDirectoryResponse.status:type_name -> proto.Status - 3, // 11: proto.CreateSymbolicLinkRequest.host:type_name -> proto.Host - 15, // 12: proto.CreateSymbolicLinkRequest.options:type_name -> proto.CreateSymbolicLinkOptions - 8, // 13: proto.CreateSymbolicLinkRequest.link_path:type_name -> proto.FileRequest - 7, // 14: proto.CreateSymbolicLinkResponse.status:type_name -> proto.Status - 3, // 15: proto.ListDirectoryRequest.host:type_name -> proto.Host - 7, // 16: proto.ListDirectoryResponse.status:type_name -> proto.Status - 4, // 17: proto.ListDirectoryResponse.files:type_name -> proto.FileInfo - 3, // 18: proto.RemoveDirectoryRequest.host:type_name -> proto.Host - 18, // 19: proto.RemoveDirectoryRequest.options:type_name -> proto.RemoveOptions - 7, // 20: proto.RemoveDirectoryResponse.status:type_name -> proto.Status - 3, // 21: proto.RemoveFileRequest.host:type_name -> proto.Host - 18, // 22: proto.RemoveFileRequest.options:type_name -> proto.RemoveOptions - 7, // 23: proto.RemoveFileResponse.status:type_name -> proto.Status - 3, // 24: proto.TruncateFileRequest.host:type_name -> proto.Host - 25, // 25: proto.TruncateFileRequest.options:type_name -> proto.TruncateFileOptions - 7, // 26: proto.TruncateFileResponse.status:type_name -> proto.Status - 54, // 27: proto.RemoteFsEndpoint.auth:type_name -> proto.RemoteFsEndpoint.AuthEntry - 55, // 28: proto.RemoteFsEndpoint.other:type_name -> proto.RemoteFsEndpoint.OtherEntry - 28, // 29: proto.DownloadSource.endpoint:type_name -> proto.RemoteFsEndpoint - 31, // 30: proto.DownloadTask.source:type_name -> proto.DownloadSource - 3, // 31: proto.DownloadRequest.host:type_name -> proto.Host - 30, // 32: proto.DownloadRequest.async_task:type_name -> proto.AsyncTaskRequest - 32, // 33: proto.DownloadRequest.tasks:type_name -> proto.DownloadTask - 7, // 34: proto.DownloadResponse.status:type_name -> proto.Status - 29, // 35: proto.DownloadResponse.task:type_name -> proto.AsyncTask - 3, // 36: proto.TransferRequest.src_host:type_name -> proto.Host - 3, // 37: proto.TransferRequest.dest_host:type_name -> proto.Host - 30, // 38: proto.TransferRequest.async_task:type_name -> proto.AsyncTaskRequest - 7, // 39: proto.TransferResponse.status:type_name -> proto.Status - 29, // 40: proto.TransferResponse.task:type_name -> proto.AsyncTask - 3, // 41: proto.ShowDiskUsageRequest.host:type_name -> proto.Host - 7, // 42: proto.ShowDiskUsageResponse.status:type_name -> proto.Status - 3, // 43: proto.ShowAsyncTaskStatusRequest.host:type_name -> proto.Host - 29, // 44: proto.ShowAsyncTaskStatusRequest.task:type_name -> proto.AsyncTask - 7, // 45: proto.ShowAsyncTaskStatusResponse.status:type_name -> proto.Status - 0, // 46: proto.ShowAsyncTaskStatusResponse.task_status:type_name -> proto.TaskStatus - 3, // 47: proto.CancelAsyncTaskRequest.host:type_name -> proto.Host - 29, // 48: proto.CancelAsyncTaskRequest.task:type_name -> proto.AsyncTask - 7, // 49: proto.CancelAsyncTaskResponse.status:type_name -> proto.Status - 28, // 50: proto.UploadTarget.endpoint:type_name -> proto.RemoteFsEndpoint - 3, // 51: proto.UploadRequest.host:type_name -> proto.Host - 30, // 52: proto.UploadRequest.async_task:type_name -> proto.AsyncTaskRequest - 43, // 53: proto.UploadRequest.target:type_name -> proto.UploadTarget - 7, // 54: proto.UploadResponse.status:type_name -> proto.Status - 29, // 55: proto.UploadResponse.task:type_name -> proto.AsyncTask - 28, // 56: proto.DeleteRemoteFileRequest.target:type_name -> proto.RemoteFsEndpoint - 7, // 57: proto.DeleteRemoteFileResponse.status:type_name -> proto.Status - 3, // 58: proto.ShowDiskInfoRequest.host:type_name -> proto.Host - 7, // 59: proto.ShowDiskInfoResponse.status:type_name -> proto.Status - 48, // 60: proto.ShowDiskInfoResponse.info:type_name -> proto.DiskInfo - 1, // 61: proto.BlkioCtrl.key:type_name -> proto.BlkioKey - 3, // 62: proto.ControlCgroupsBlkioRequest.host:type_name -> proto.Host - 51, // 63: proto.ControlCgroupsBlkioRequest.controls:type_name -> proto.BlkioCtrl - 7, // 64: proto.ControlCgroupsBlkioResponse.status:type_name -> proto.Status - 13, // 65: proto.HpfsService.CreateDirectory:input_type -> proto.CreateDirectoryRequest - 10, // 66: proto.HpfsService.CreateFile:input_type -> proto.CreateFileRequest - 16, // 67: proto.HpfsService.CreateSymbolicLink:input_type -> proto.CreateSymbolicLinkRequest - 19, // 68: proto.HpfsService.ListDirectory:input_type -> proto.ListDirectoryRequest - 21, // 69: proto.HpfsService.RemoveDirectory:input_type -> proto.RemoveDirectoryRequest - 23, // 70: proto.HpfsService.RemoveFile:input_type -> proto.RemoveFileRequest - 26, // 71: proto.HpfsService.TruncateFile:input_type -> proto.TruncateFileRequest - 33, // 72: proto.HpfsService.DownloadFiles:input_type -> proto.DownloadRequest - 44, // 73: proto.HpfsService.UploadFiles:input_type -> proto.UploadRequest - 46, // 74: proto.HpfsService.DeleteRemoteFile:input_type -> proto.DeleteRemoteFileRequest - 39, // 75: proto.HpfsService.ShowAsyncTaskStatus:input_type -> proto.ShowAsyncTaskStatusRequest - 41, // 76: proto.HpfsService.CancelAsyncTask:input_type -> proto.CancelAsyncTaskRequest - 35, // 77: proto.HpfsService.TransferFiles:input_type -> proto.TransferRequest - 37, // 78: proto.HpfsService.ShowDiskUsage:input_type -> proto.ShowDiskUsageRequest - 49, // 79: proto.HpfsService.ShowDiskInfo:input_type -> proto.ShowDiskInfoRequest - 52, // 80: proto.HpfsService.ControlCgroupsBlkio:input_type -> proto.ControlCgroupsBlkioRequest - 14, // 81: proto.HpfsService.CreateDirectory:output_type -> proto.CreateDirectoryResponse - 11, // 82: proto.HpfsService.CreateFile:output_type -> proto.CreateFileResponse - 17, // 83: proto.HpfsService.CreateSymbolicLink:output_type -> proto.CreateSymbolicLinkResponse - 20, // 84: proto.HpfsService.ListDirectory:output_type -> proto.ListDirectoryResponse - 22, // 85: proto.HpfsService.RemoveDirectory:output_type -> proto.RemoveDirectoryResponse - 24, // 86: proto.HpfsService.RemoveFile:output_type -> proto.RemoveFileResponse - 27, // 87: proto.HpfsService.TruncateFile:output_type -> proto.TruncateFileResponse - 34, // 88: proto.HpfsService.DownloadFiles:output_type -> proto.DownloadResponse - 45, // 89: proto.HpfsService.UploadFiles:output_type -> proto.UploadResponse - 47, // 90: proto.HpfsService.DeleteRemoteFile:output_type -> proto.DeleteRemoteFileResponse - 40, // 91: proto.HpfsService.ShowAsyncTaskStatus:output_type -> proto.ShowAsyncTaskStatusResponse - 42, // 92: proto.HpfsService.CancelAsyncTask:output_type -> proto.CancelAsyncTaskResponse - 36, // 93: proto.HpfsService.TransferFiles:output_type -> proto.TransferResponse - 38, // 94: proto.HpfsService.ShowDiskUsage:output_type -> proto.ShowDiskUsageResponse - 50, // 95: proto.HpfsService.ShowDiskInfo:output_type -> proto.ShowDiskInfoResponse - 53, // 96: proto.HpfsService.ControlCgroupsBlkio:output_type -> proto.ControlCgroupsBlkioResponse - 81, // [81:97] is the sub-list for method output_type - 65, // [65:81] is the sub-list for method input_type - 65, // [65:65] is the sub-list for extension type_name - 65, // [65:65] is the sub-list for extension extendee - 0, // [0:65] is the sub-list for field type_name + 70, // 0: proto.FileInfo.mod_time:type_name -> google.protobuf.Timestamp + 5, // 1: proto.File.stats:type_name -> proto.FileStats + 2, // 2: proto.Status.code:type_name -> proto.Status.StatusCode + 3, // 3: proto.CreateFileRequest.host:type_name -> proto.Host + 9, // 4: proto.CreateFileRequest.options:type_name -> proto.CreateFileOptions + 8, // 5: proto.CreateFileRequest.file:type_name -> proto.FileRequest + 7, // 6: proto.CreateFileResponse.status:type_name -> proto.Status + 3, // 7: proto.CreateDirectoryRequest.host:type_name -> proto.Host + 12, // 8: proto.CreateDirectoryRequest.options:type_name -> proto.CreateDirectoryOptions + 8, // 9: proto.CreateDirectoryRequest.directory:type_name -> proto.FileRequest + 7, // 10: proto.CreateDirectoryResponse.status:type_name -> proto.Status + 3, // 11: proto.CreateSymbolicLinkRequest.host:type_name -> proto.Host + 15, // 12: proto.CreateSymbolicLinkRequest.options:type_name -> proto.CreateSymbolicLinkOptions + 8, // 13: proto.CreateSymbolicLinkRequest.link_path:type_name -> proto.FileRequest + 7, // 14: proto.CreateSymbolicLinkResponse.status:type_name -> proto.Status + 3, // 15: proto.ListDirectoryRequest.host:type_name -> proto.Host + 7, // 16: proto.ListDirectoryResponse.status:type_name -> proto.Status + 4, // 17: proto.ListDirectoryResponse.files:type_name -> proto.FileInfo + 3, // 18: proto.RemoveDirectoryRequest.host:type_name -> proto.Host + 18, // 19: proto.RemoveDirectoryRequest.options:type_name -> proto.RemoveOptions + 7, // 20: proto.RemoveDirectoryResponse.status:type_name -> proto.Status + 3, // 21: proto.RemoveFileRequest.host:type_name -> proto.Host + 18, // 22: proto.RemoveFileRequest.options:type_name -> proto.RemoveOptions + 7, // 23: proto.RemoveFileResponse.status:type_name -> proto.Status + 3, // 24: proto.TruncateFileRequest.host:type_name -> proto.Host + 25, // 25: proto.TruncateFileRequest.options:type_name -> proto.TruncateFileOptions + 7, // 26: proto.TruncateFileResponse.status:type_name -> proto.Status + 68, // 27: proto.RemoteFsEndpoint.auth:type_name -> proto.RemoteFsEndpoint.AuthEntry + 69, // 28: proto.RemoteFsEndpoint.other:type_name -> proto.RemoteFsEndpoint.OtherEntry + 28, // 29: proto.DownloadSource.endpoint:type_name -> proto.RemoteFsEndpoint + 31, // 30: proto.DownloadTask.source:type_name -> proto.DownloadSource + 3, // 31: proto.DownloadRequest.host:type_name -> proto.Host + 30, // 32: proto.DownloadRequest.async_task:type_name -> proto.AsyncTaskRequest + 32, // 33: proto.DownloadRequest.tasks:type_name -> proto.DownloadTask + 7, // 34: proto.DownloadResponse.status:type_name -> proto.Status + 29, // 35: proto.DownloadResponse.task:type_name -> proto.AsyncTask + 3, // 36: proto.TransferRequest.src_host:type_name -> proto.Host + 3, // 37: proto.TransferRequest.dest_host:type_name -> proto.Host + 30, // 38: proto.TransferRequest.async_task:type_name -> proto.AsyncTaskRequest + 7, // 39: proto.TransferResponse.status:type_name -> proto.Status + 29, // 40: proto.TransferResponse.task:type_name -> proto.AsyncTask + 3, // 41: proto.ShowDiskUsageRequest.host:type_name -> proto.Host + 7, // 42: proto.ShowDiskUsageResponse.status:type_name -> proto.Status + 3, // 43: proto.ShowAsyncTaskStatusRequest.host:type_name -> proto.Host + 29, // 44: proto.ShowAsyncTaskStatusRequest.task:type_name -> proto.AsyncTask + 7, // 45: proto.ShowAsyncTaskStatusResponse.status:type_name -> proto.Status + 0, // 46: proto.ShowAsyncTaskStatusResponse.task_status:type_name -> proto.TaskStatus + 3, // 47: proto.CancelAsyncTaskRequest.host:type_name -> proto.Host + 29, // 48: proto.CancelAsyncTaskRequest.task:type_name -> proto.AsyncTask + 7, // 49: proto.CancelAsyncTaskResponse.status:type_name -> proto.Status + 28, // 50: proto.UploadTarget.endpoint:type_name -> proto.RemoteFsEndpoint + 3, // 51: proto.UploadRequest.host:type_name -> proto.Host + 30, // 52: proto.UploadRequest.async_task:type_name -> proto.AsyncTaskRequest + 43, // 53: proto.UploadRequest.target:type_name -> proto.UploadTarget + 7, // 54: proto.UploadResponse.status:type_name -> proto.Status + 29, // 55: proto.UploadResponse.task:type_name -> proto.AsyncTask + 28, // 56: proto.DeleteRemoteFileRequest.target:type_name -> proto.RemoteFsEndpoint + 7, // 57: proto.DeleteRemoteFileResponse.status:type_name -> proto.Status + 3, // 58: proto.ShowDiskInfoRequest.host:type_name -> proto.Host + 7, // 59: proto.ShowDiskInfoResponse.status:type_name -> proto.Status + 48, // 60: proto.ShowDiskInfoResponse.info:type_name -> proto.DiskInfo + 1, // 61: proto.BlkioCtrl.key:type_name -> proto.BlkioKey + 3, // 62: proto.ControlCgroupsBlkioRequest.host:type_name -> proto.Host + 51, // 63: proto.ControlCgroupsBlkioRequest.controls:type_name -> proto.BlkioCtrl + 7, // 64: proto.ControlCgroupsBlkioResponse.status:type_name -> proto.Status + 3, // 65: proto.OpenBackupBinlogRequest.host:type_name -> proto.Host + 7, // 66: proto.OpenBackupBinlogResponse.status:type_name -> proto.Status + 3, // 67: proto.CloseBackupBinlogRequest.host:type_name -> proto.Host + 7, // 68: proto.CloseBackupBinlogResponse.status:type_name -> proto.Status + 3, // 69: proto.UploadLatestBinlogFileRequest.host:type_name -> proto.Host + 7, // 70: proto.UploadLatestBinlogFileResponse.status:type_name -> proto.Status + 3, // 71: proto.GetWatcherInfoHashRequest.host:type_name -> proto.Host + 7, // 72: proto.GetWatcherInfoHashResponse.status:type_name -> proto.Status + 7, // 73: proto.DeleteBinlogFilesBeforeResponse.status:type_name -> proto.Status + 3, // 74: proto.ListLocalBinlogListRequest.host:type_name -> proto.Host + 7, // 75: proto.ListLocalBinlogListResponse.status:type_name -> proto.Status + 7, // 76: proto.ListRemoteBinlogListResponse.status:type_name -> proto.Status + 13, // 77: proto.HpfsService.CreateDirectory:input_type -> proto.CreateDirectoryRequest + 10, // 78: proto.HpfsService.CreateFile:input_type -> proto.CreateFileRequest + 16, // 79: proto.HpfsService.CreateSymbolicLink:input_type -> proto.CreateSymbolicLinkRequest + 19, // 80: proto.HpfsService.ListDirectory:input_type -> proto.ListDirectoryRequest + 21, // 81: proto.HpfsService.RemoveDirectory:input_type -> proto.RemoveDirectoryRequest + 23, // 82: proto.HpfsService.RemoveFile:input_type -> proto.RemoveFileRequest + 26, // 83: proto.HpfsService.TruncateFile:input_type -> proto.TruncateFileRequest + 33, // 84: proto.HpfsService.DownloadFiles:input_type -> proto.DownloadRequest + 44, // 85: proto.HpfsService.UploadFiles:input_type -> proto.UploadRequest + 46, // 86: proto.HpfsService.DeleteRemoteFile:input_type -> proto.DeleteRemoteFileRequest + 39, // 87: proto.HpfsService.ShowAsyncTaskStatus:input_type -> proto.ShowAsyncTaskStatusRequest + 41, // 88: proto.HpfsService.CancelAsyncTask:input_type -> proto.CancelAsyncTaskRequest + 35, // 89: proto.HpfsService.TransferFiles:input_type -> proto.TransferRequest + 37, // 90: proto.HpfsService.ShowDiskUsage:input_type -> proto.ShowDiskUsageRequest + 49, // 91: proto.HpfsService.ShowDiskInfo:input_type -> proto.ShowDiskInfoRequest + 52, // 92: proto.HpfsService.ControlCgroupsBlkio:input_type -> proto.ControlCgroupsBlkioRequest + 54, // 93: proto.HpfsService.OpenBackupBinlog:input_type -> proto.OpenBackupBinlogRequest + 56, // 94: proto.HpfsService.CloseBackupBinlog:input_type -> proto.CloseBackupBinlogRequest + 58, // 95: proto.HpfsService.UploadLatestBinlogFile:input_type -> proto.UploadLatestBinlogFileRequest + 60, // 96: proto.HpfsService.GetWatcherInfoHash:input_type -> proto.GetWatcherInfoHashRequest + 62, // 97: proto.HpfsService.DeleteBinlogFilesBefore:input_type -> proto.DeleteBinlogFilesBeforeRequest + 64, // 98: proto.HpfsService.ListLocalBinlogList:input_type -> proto.ListLocalBinlogListRequest + 66, // 99: proto.HpfsService.ListRemoteBinlogList:input_type -> proto.ListRemoteBinlogListRequest + 14, // 100: proto.HpfsService.CreateDirectory:output_type -> proto.CreateDirectoryResponse + 11, // 101: proto.HpfsService.CreateFile:output_type -> proto.CreateFileResponse + 17, // 102: proto.HpfsService.CreateSymbolicLink:output_type -> proto.CreateSymbolicLinkResponse + 20, // 103: proto.HpfsService.ListDirectory:output_type -> proto.ListDirectoryResponse + 22, // 104: proto.HpfsService.RemoveDirectory:output_type -> proto.RemoveDirectoryResponse + 24, // 105: proto.HpfsService.RemoveFile:output_type -> proto.RemoveFileResponse + 27, // 106: proto.HpfsService.TruncateFile:output_type -> proto.TruncateFileResponse + 34, // 107: proto.HpfsService.DownloadFiles:output_type -> proto.DownloadResponse + 45, // 108: proto.HpfsService.UploadFiles:output_type -> proto.UploadResponse + 47, // 109: proto.HpfsService.DeleteRemoteFile:output_type -> proto.DeleteRemoteFileResponse + 40, // 110: proto.HpfsService.ShowAsyncTaskStatus:output_type -> proto.ShowAsyncTaskStatusResponse + 42, // 111: proto.HpfsService.CancelAsyncTask:output_type -> proto.CancelAsyncTaskResponse + 36, // 112: proto.HpfsService.TransferFiles:output_type -> proto.TransferResponse + 38, // 113: proto.HpfsService.ShowDiskUsage:output_type -> proto.ShowDiskUsageResponse + 50, // 114: proto.HpfsService.ShowDiskInfo:output_type -> proto.ShowDiskInfoResponse + 53, // 115: proto.HpfsService.ControlCgroupsBlkio:output_type -> proto.ControlCgroupsBlkioResponse + 55, // 116: proto.HpfsService.OpenBackupBinlog:output_type -> proto.OpenBackupBinlogResponse + 57, // 117: proto.HpfsService.CloseBackupBinlog:output_type -> proto.CloseBackupBinlogResponse + 59, // 118: proto.HpfsService.UploadLatestBinlogFile:output_type -> proto.UploadLatestBinlogFileResponse + 61, // 119: proto.HpfsService.GetWatcherInfoHash:output_type -> proto.GetWatcherInfoHashResponse + 63, // 120: proto.HpfsService.DeleteBinlogFilesBefore:output_type -> proto.DeleteBinlogFilesBeforeResponse + 65, // 121: proto.HpfsService.ListLocalBinlogList:output_type -> proto.ListLocalBinlogListResponse + 67, // 122: proto.HpfsService.ListRemoteBinlogList:output_type -> proto.ListRemoteBinlogListResponse + 100, // [100:123] is the sub-list for method output_type + 77, // [77:100] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_hpfs_proto_init() } @@ -4341,8 +5420,176 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveDirectoryResponse); i { + file_hpfs_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveDirectoryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TruncateFileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TruncateFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TruncateFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteFsEndpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsyncTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsyncTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_hpfs_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferResponse); i { case 0: return &v.state case 1: @@ -4353,8 +5600,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveFileRequest); i { + file_hpfs_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowDiskUsageRequest); i { case 0: return &v.state case 1: @@ -4365,8 +5612,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveFileResponse); i { + file_hpfs_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowDiskUsageResponse); i { case 0: return &v.state case 1: @@ -4377,8 +5624,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TruncateFileOptions); i { + file_hpfs_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowAsyncTaskStatusRequest); i { case 0: return &v.state case 1: @@ -4389,8 +5636,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TruncateFileRequest); i { + file_hpfs_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowAsyncTaskStatusResponse); i { case 0: return &v.state case 1: @@ -4401,8 +5648,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TruncateFileResponse); i { + file_hpfs_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelAsyncTaskRequest); i { case 0: return &v.state case 1: @@ -4413,8 +5660,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoteFsEndpoint); i { + file_hpfs_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelAsyncTaskResponse); i { case 0: return &v.state case 1: @@ -4425,8 +5672,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AsyncTask); i { + file_hpfs_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadTarget); i { case 0: return &v.state case 1: @@ -4437,8 +5684,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AsyncTaskRequest); i { + file_hpfs_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadRequest); i { case 0: return &v.state case 1: @@ -4449,8 +5696,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadSource); i { + file_hpfs_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadResponse); i { case 0: return &v.state case 1: @@ -4461,8 +5708,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadTask); i { + file_hpfs_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRemoteFileRequest); i { case 0: return &v.state case 1: @@ -4473,8 +5720,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadRequest); i { + file_hpfs_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRemoteFileResponse); i { case 0: return &v.state case 1: @@ -4485,8 +5732,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadResponse); i { + file_hpfs_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskInfo); i { case 0: return &v.state case 1: @@ -4497,8 +5744,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferRequest); i { + file_hpfs_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowDiskInfoRequest); i { case 0: return &v.state case 1: @@ -4509,8 +5756,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferResponse); i { + file_hpfs_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowDiskInfoResponse); i { case 0: return &v.state case 1: @@ -4521,8 +5768,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowDiskUsageRequest); i { + file_hpfs_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlkioCtrl); i { case 0: return &v.state case 1: @@ -4533,8 +5780,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowDiskUsageResponse); i { + file_hpfs_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlCgroupsBlkioRequest); i { case 0: return &v.state case 1: @@ -4545,8 +5792,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowAsyncTaskStatusRequest); i { + file_hpfs_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlCgroupsBlkioResponse); i { case 0: return &v.state case 1: @@ -4557,8 +5804,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowAsyncTaskStatusResponse); i { + file_hpfs_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenBackupBinlogRequest); i { case 0: return &v.state case 1: @@ -4569,8 +5816,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelAsyncTaskRequest); i { + file_hpfs_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenBackupBinlogResponse); i { case 0: return &v.state case 1: @@ -4581,8 +5828,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelAsyncTaskResponse); i { + file_hpfs_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseBackupBinlogRequest); i { case 0: return &v.state case 1: @@ -4593,8 +5840,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadTarget); i { + file_hpfs_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseBackupBinlogResponse); i { case 0: return &v.state case 1: @@ -4605,8 +5852,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadRequest); i { + file_hpfs_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadLatestBinlogFileRequest); i { case 0: return &v.state case 1: @@ -4617,8 +5864,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadResponse); i { + file_hpfs_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadLatestBinlogFileResponse); i { case 0: return &v.state case 1: @@ -4629,8 +5876,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteRemoteFileRequest); i { + file_hpfs_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWatcherInfoHashRequest); i { case 0: return &v.state case 1: @@ -4641,8 +5888,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteRemoteFileResponse); i { + file_hpfs_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWatcherInfoHashResponse); i { case 0: return &v.state case 1: @@ -4653,8 +5900,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DiskInfo); i { + file_hpfs_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBinlogFilesBeforeRequest); i { case 0: return &v.state case 1: @@ -4665,8 +5912,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowDiskInfoRequest); i { + file_hpfs_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteBinlogFilesBeforeResponse); i { case 0: return &v.state case 1: @@ -4677,8 +5924,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShowDiskInfoResponse); i { + file_hpfs_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocalBinlogListRequest); i { case 0: return &v.state case 1: @@ -4689,8 +5936,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlkioCtrl); i { + file_hpfs_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocalBinlogListResponse); i { case 0: return &v.state case 1: @@ -4701,8 +5948,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ControlCgroupsBlkioRequest); i { + file_hpfs_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRemoteBinlogListRequest); i { case 0: return &v.state case 1: @@ -4713,8 +5960,8 @@ func file_hpfs_proto_init() { return nil } } - file_hpfs_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ControlCgroupsBlkioResponse); i { + file_hpfs_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRemoteBinlogListResponse); i { case 0: return &v.state case 1: @@ -4749,7 +5996,7 @@ func file_hpfs_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_hpfs_proto_rawDesc, NumEnums: 3, - NumMessages: 53, + NumMessages: 67, NumExtensions: 0, NumServices: 1, }, @@ -4793,6 +6040,14 @@ type HpfsServiceClient interface { ShowDiskInfo(ctx context.Context, in *ShowDiskInfoRequest, opts ...grpc.CallOption) (*ShowDiskInfoResponse, error) // Control blkio cgroups for kubernetes pods. ControlCgroupsBlkio(ctx context.Context, in *ControlCgroupsBlkioRequest, opts ...grpc.CallOption) (*ControlCgroupsBlkioResponse, error) + //backup binlog + OpenBackupBinlog(ctx context.Context, in *OpenBackupBinlogRequest, opts ...grpc.CallOption) (*OpenBackupBinlogResponse, error) + CloseBackupBinlog(ctx context.Context, in *CloseBackupBinlogRequest, opts ...grpc.CallOption) (*CloseBackupBinlogResponse, error) + UploadLatestBinlogFile(ctx context.Context, in *UploadLatestBinlogFileRequest, opts ...grpc.CallOption) (*UploadLatestBinlogFileResponse, error) + GetWatcherInfoHash(ctx context.Context, in *GetWatcherInfoHashRequest, opts ...grpc.CallOption) (*GetWatcherInfoHashResponse, error) + DeleteBinlogFilesBefore(ctx context.Context, in *DeleteBinlogFilesBeforeRequest, opts ...grpc.CallOption) (*DeleteBinlogFilesBeforeResponse, error) + ListLocalBinlogList(ctx context.Context, in *ListLocalBinlogListRequest, opts ...grpc.CallOption) (*ListLocalBinlogListResponse, error) + ListRemoteBinlogList(ctx context.Context, in *ListRemoteBinlogListRequest, opts ...grpc.CallOption) (*ListRemoteBinlogListResponse, error) } type hpfsServiceClient struct { @@ -4947,6 +6202,69 @@ func (c *hpfsServiceClient) ControlCgroupsBlkio(ctx context.Context, in *Control return out, nil } +func (c *hpfsServiceClient) OpenBackupBinlog(ctx context.Context, in *OpenBackupBinlogRequest, opts ...grpc.CallOption) (*OpenBackupBinlogResponse, error) { + out := new(OpenBackupBinlogResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/OpenBackupBinlog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) CloseBackupBinlog(ctx context.Context, in *CloseBackupBinlogRequest, opts ...grpc.CallOption) (*CloseBackupBinlogResponse, error) { + out := new(CloseBackupBinlogResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/CloseBackupBinlog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) UploadLatestBinlogFile(ctx context.Context, in *UploadLatestBinlogFileRequest, opts ...grpc.CallOption) (*UploadLatestBinlogFileResponse, error) { + out := new(UploadLatestBinlogFileResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/UploadLatestBinlogFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) GetWatcherInfoHash(ctx context.Context, in *GetWatcherInfoHashRequest, opts ...grpc.CallOption) (*GetWatcherInfoHashResponse, error) { + out := new(GetWatcherInfoHashResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/GetWatcherInfoHash", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) DeleteBinlogFilesBefore(ctx context.Context, in *DeleteBinlogFilesBeforeRequest, opts ...grpc.CallOption) (*DeleteBinlogFilesBeforeResponse, error) { + out := new(DeleteBinlogFilesBeforeResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/DeleteBinlogFilesBefore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) ListLocalBinlogList(ctx context.Context, in *ListLocalBinlogListRequest, opts ...grpc.CallOption) (*ListLocalBinlogListResponse, error) { + out := new(ListLocalBinlogListResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/ListLocalBinlogList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hpfsServiceClient) ListRemoteBinlogList(ctx context.Context, in *ListRemoteBinlogListRequest, opts ...grpc.CallOption) (*ListRemoteBinlogListResponse, error) { + out := new(ListRemoteBinlogListResponse) + err := c.cc.Invoke(ctx, "/proto.HpfsService/ListRemoteBinlogList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // HpfsServiceServer is the server API for HpfsService service. type HpfsServiceServer interface { CreateDirectory(context.Context, *CreateDirectoryRequest) (*CreateDirectoryResponse, error) @@ -4966,6 +6284,14 @@ type HpfsServiceServer interface { ShowDiskInfo(context.Context, *ShowDiskInfoRequest) (*ShowDiskInfoResponse, error) // Control blkio cgroups for kubernetes pods. ControlCgroupsBlkio(context.Context, *ControlCgroupsBlkioRequest) (*ControlCgroupsBlkioResponse, error) + //backup binlog + OpenBackupBinlog(context.Context, *OpenBackupBinlogRequest) (*OpenBackupBinlogResponse, error) + CloseBackupBinlog(context.Context, *CloseBackupBinlogRequest) (*CloseBackupBinlogResponse, error) + UploadLatestBinlogFile(context.Context, *UploadLatestBinlogFileRequest) (*UploadLatestBinlogFileResponse, error) + GetWatcherInfoHash(context.Context, *GetWatcherInfoHashRequest) (*GetWatcherInfoHashResponse, error) + DeleteBinlogFilesBefore(context.Context, *DeleteBinlogFilesBeforeRequest) (*DeleteBinlogFilesBeforeResponse, error) + ListLocalBinlogList(context.Context, *ListLocalBinlogListRequest) (*ListLocalBinlogListResponse, error) + ListRemoteBinlogList(context.Context, *ListRemoteBinlogListRequest) (*ListRemoteBinlogListResponse, error) } // UnimplementedHpfsServiceServer can be embedded to have forward compatible implementations. @@ -5020,6 +6346,27 @@ func (*UnimplementedHpfsServiceServer) ShowDiskInfo(context.Context, *ShowDiskIn func (*UnimplementedHpfsServiceServer) ControlCgroupsBlkio(context.Context, *ControlCgroupsBlkioRequest) (*ControlCgroupsBlkioResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ControlCgroupsBlkio not implemented") } +func (*UnimplementedHpfsServiceServer) OpenBackupBinlog(context.Context, *OpenBackupBinlogRequest) (*OpenBackupBinlogResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OpenBackupBinlog not implemented") +} +func (*UnimplementedHpfsServiceServer) CloseBackupBinlog(context.Context, *CloseBackupBinlogRequest) (*CloseBackupBinlogResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseBackupBinlog not implemented") +} +func (*UnimplementedHpfsServiceServer) UploadLatestBinlogFile(context.Context, *UploadLatestBinlogFileRequest) (*UploadLatestBinlogFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UploadLatestBinlogFile not implemented") +} +func (*UnimplementedHpfsServiceServer) GetWatcherInfoHash(context.Context, *GetWatcherInfoHashRequest) (*GetWatcherInfoHashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWatcherInfoHash not implemented") +} +func (*UnimplementedHpfsServiceServer) DeleteBinlogFilesBefore(context.Context, *DeleteBinlogFilesBeforeRequest) (*DeleteBinlogFilesBeforeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBinlogFilesBefore not implemented") +} +func (*UnimplementedHpfsServiceServer) ListLocalBinlogList(context.Context, *ListLocalBinlogListRequest) (*ListLocalBinlogListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLocalBinlogList not implemented") +} +func (*UnimplementedHpfsServiceServer) ListRemoteBinlogList(context.Context, *ListRemoteBinlogListRequest) (*ListRemoteBinlogListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListRemoteBinlogList not implemented") +} func RegisterHpfsServiceServer(s *grpc.Server, srv HpfsServiceServer) { s.RegisterService(&_HpfsService_serviceDesc, srv) @@ -5313,6 +6660,132 @@ func _HpfsService_ControlCgroupsBlkio_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _HpfsService_OpenBackupBinlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OpenBackupBinlogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).OpenBackupBinlog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/OpenBackupBinlog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).OpenBackupBinlog(ctx, req.(*OpenBackupBinlogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_CloseBackupBinlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseBackupBinlogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).CloseBackupBinlog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/CloseBackupBinlog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).CloseBackupBinlog(ctx, req.(*CloseBackupBinlogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_UploadLatestBinlogFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UploadLatestBinlogFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).UploadLatestBinlogFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/UploadLatestBinlogFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).UploadLatestBinlogFile(ctx, req.(*UploadLatestBinlogFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_GetWatcherInfoHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWatcherInfoHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).GetWatcherInfoHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/GetWatcherInfoHash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).GetWatcherInfoHash(ctx, req.(*GetWatcherInfoHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_DeleteBinlogFilesBefore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBinlogFilesBeforeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).DeleteBinlogFilesBefore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/DeleteBinlogFilesBefore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).DeleteBinlogFilesBefore(ctx, req.(*DeleteBinlogFilesBeforeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_ListLocalBinlogList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLocalBinlogListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).ListLocalBinlogList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/ListLocalBinlogList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).ListLocalBinlogList(ctx, req.(*ListLocalBinlogListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HpfsService_ListRemoteBinlogList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRemoteBinlogListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HpfsServiceServer).ListRemoteBinlogList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HpfsService/ListRemoteBinlogList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HpfsServiceServer).ListRemoteBinlogList(ctx, req.(*ListRemoteBinlogListRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _HpfsService_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.HpfsService", HandlerType: (*HpfsServiceServer)(nil), @@ -5381,6 +6854,34 @@ var _HpfsService_serviceDesc = grpc.ServiceDesc{ MethodName: "ControlCgroupsBlkio", Handler: _HpfsService_ControlCgroupsBlkio_Handler, }, + { + MethodName: "OpenBackupBinlog", + Handler: _HpfsService_OpenBackupBinlog_Handler, + }, + { + MethodName: "CloseBackupBinlog", + Handler: _HpfsService_CloseBackupBinlog_Handler, + }, + { + MethodName: "UploadLatestBinlogFile", + Handler: _HpfsService_UploadLatestBinlogFile_Handler, + }, + { + MethodName: "GetWatcherInfoHash", + Handler: _HpfsService_GetWatcherInfoHash_Handler, + }, + { + MethodName: "DeleteBinlogFilesBefore", + Handler: _HpfsService_DeleteBinlogFilesBefore_Handler, + }, + { + MethodName: "ListLocalBinlogList", + Handler: _HpfsService_ListLocalBinlogList_Handler, + }, + { + MethodName: "ListRemoteBinlogList", + Handler: _HpfsService_ListRemoteBinlogList_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "hpfs.proto", diff --git a/pkg/hpfs/proto/hpfs.proto b/pkg/hpfs/proto/hpfs.proto index 093e9cd..2d66d3d 100644 --- a/pkg/hpfs/proto/hpfs.proto +++ b/pkg/hpfs/proto/hpfs.proto @@ -24,23 +24,32 @@ option go_package = "github.com/alibaba/polardbx-operator/pkg/hpfs/proto"; //@formatter:off service HpfsService { - rpc CreateDirectory(CreateDirectoryRequest) returns (CreateDirectoryResponse) {} - rpc CreateFile(CreateFileRequest) returns (CreateFileResponse) {} - rpc CreateSymbolicLink(CreateSymbolicLinkRequest) returns (CreateSymbolicLinkResponse) {} - rpc ListDirectory(ListDirectoryRequest) returns (ListDirectoryResponse) {} - rpc RemoveDirectory(RemoveDirectoryRequest) returns (RemoveDirectoryResponse) {} - rpc RemoveFile(RemoveFileRequest) returns (RemoveFileResponse) {} - rpc TruncateFile(TruncateFileRequest) returns (TruncateFileResponse) {} - rpc DownloadFiles(DownloadRequest) returns (DownloadResponse) {} - rpc UploadFiles(UploadRequest) returns (UploadResponse) {} - rpc DeleteRemoteFile(DeleteRemoteFileRequest) returns (DeleteRemoteFileResponse) {} - rpc ShowAsyncTaskStatus(ShowAsyncTaskStatusRequest) returns (ShowAsyncTaskStatusResponse) {} - rpc CancelAsyncTask(CancelAsyncTaskRequest) returns (CancelAsyncTaskResponse) {} - rpc TransferFiles(TransferRequest) returns (TransferResponse) {} - rpc ShowDiskUsage(ShowDiskUsageRequest) returns (ShowDiskUsageResponse) {} - rpc ShowDiskInfo(ShowDiskInfoRequest) returns (ShowDiskInfoResponse) {} - // Control blkio cgroups for kubernetes pods. - rpc ControlCgroupsBlkio(ControlCgroupsBlkioRequest) returns (ControlCgroupsBlkioResponse) {} + rpc CreateDirectory(CreateDirectoryRequest) returns (CreateDirectoryResponse) {} + rpc CreateFile(CreateFileRequest) returns (CreateFileResponse) {} + rpc CreateSymbolicLink(CreateSymbolicLinkRequest) returns (CreateSymbolicLinkResponse) {} + rpc ListDirectory(ListDirectoryRequest) returns (ListDirectoryResponse) {} + rpc RemoveDirectory(RemoveDirectoryRequest) returns (RemoveDirectoryResponse) {} + rpc RemoveFile(RemoveFileRequest) returns (RemoveFileResponse) {} + rpc TruncateFile(TruncateFileRequest) returns (TruncateFileResponse) {} + rpc DownloadFiles(DownloadRequest) returns (DownloadResponse) {} + rpc UploadFiles(UploadRequest) returns (UploadResponse) {} + rpc DeleteRemoteFile(DeleteRemoteFileRequest) returns (DeleteRemoteFileResponse) {} + rpc ShowAsyncTaskStatus(ShowAsyncTaskStatusRequest) returns (ShowAsyncTaskStatusResponse) {} + rpc CancelAsyncTask(CancelAsyncTaskRequest) returns (CancelAsyncTaskResponse) {} + rpc TransferFiles(TransferRequest) returns (TransferResponse) {} + rpc ShowDiskUsage(ShowDiskUsageRequest) returns (ShowDiskUsageResponse) {} + rpc ShowDiskInfo(ShowDiskInfoRequest) returns (ShowDiskInfoResponse) {} + // Control blkio cgroups for kubernetes pods. + rpc ControlCgroupsBlkio(ControlCgroupsBlkioRequest) returns (ControlCgroupsBlkioResponse) {} + + //backup binlog + rpc OpenBackupBinlog(OpenBackupBinlogRequest) returns (OpenBackupBinlogResponse){} + rpc CloseBackupBinlog(CloseBackupBinlogRequest) returns (CloseBackupBinlogResponse){} + rpc UploadLatestBinlogFile(UploadLatestBinlogFileRequest) returns (UploadLatestBinlogFileResponse){} + rpc GetWatcherInfoHash(GetWatcherInfoHashRequest) returns (GetWatcherInfoHashResponse){} + rpc DeleteBinlogFilesBefore(DeleteBinlogFilesBeforeRequest) returns (DeleteBinlogFilesBeforeResponse){} + rpc ListLocalBinlogList(ListLocalBinlogListRequest) returns (ListLocalBinlogListResponse){} + rpc ListRemoteBinlogList(ListRemoteBinlogListRequest) returns (ListRemoteBinlogListResponse){} } //@formatter:on @@ -579,4 +588,150 @@ message ControlCgroupsBlkioRequest { message ControlCgroupsBlkioResponse { // Operation status. Status status = 1; +} + +message OpenBackupBinlogRequest{ + // Target host + Host host = 1; + + //the log directory of the pod on the host + string log_dir = 2; + + //the info file content + string content = 3; +} + +message OpenBackupBinlogResponse{ + // Operation status. + Status status = 1; +} + +message CloseBackupBinlogRequest{ + // Target host + Host host = 1; + + //the log directory of the pod on the host + string log_dir = 2; +} + +message CloseBackupBinlogResponse{ + // Operation status. + Status status = 1; +} + +message UploadLatestBinlogFileRequest{ + // Target host + Host host = 1; + + //the log directory of the pod on the host + string log_dir = 2; +} + +message UploadLatestBinlogFileResponse{ + // Operation status. + Status status = 1; + + // check if the latest binlog file is uploaded + bool done = 2; +} + + +message GetWatcherInfoHashRequest{ + // Target host + Host host = 1; + + //log dir. also worker dir of the watcher + string logDir = 2; +} + +message GetWatcherInfoHashResponse{ + // Operation status. + Status status = 1; + + // Hash value of info file + string hash = 2; +} + +message DeleteBinlogFilesBeforeRequest{ + //the k8s namespace + string namespace = 1; + + // the instance name of PolarDB-X + string pxcName = 2; + + //the object uid of PolarDB-X + string pxcUid = 3; + + //the timestamp unit: seconds + int64 unixTime = 4; + + //the sink type + string sinkType = 5; + + //the sink name + string sinkName = 6; +} + +message DeleteBinlogFilesBeforeResponse{ + // Operation status. + Status status = 1; + + // delete file list + repeated string deletedFiles = 2; + + // check if the latest binlog file is uploaded + bool done = 3; +} + +message ListLocalBinlogListRequest{ + // Target host + Host host = 1; + + //log dir. also worker dir of the watcher + string logDir = 2; +} + +message ListLocalBinlogListResponse{ + // Operation status. + Status status = 1; + + //backup binlog version + string version = 2; + + // binlog file list + repeated string binlogFiles = 3; +} + +message ListRemoteBinlogListRequest{ + //the k8s namespace + string namespace = 1; + + // the instance name of PolarDB-X + string pxcName = 2; + + //the object uid of PolarDB-X + string pxcUid = 3; + + // the instance name of xstore + string xStoreName = 4; + + //the object uid of xstore + string xStoreUid = 5; + + //the pod name + string podName = 6; + + //the sink name + string sinkName = 7; + + //the sink type + string sinkType = 8; +} + +message ListRemoteBinlogListResponse{ + // Operation status. + Status status = 1; + + //the files + repeated string files = 2; } \ No newline at end of file diff --git a/pkg/hpfs/remote/aliyun_oss.go b/pkg/hpfs/remote/aliyun_oss.go index 15bd136..00d2f5b 100644 --- a/pkg/hpfs/remote/aliyun_oss.go +++ b/pkg/hpfs/remote/aliyun_oss.go @@ -17,10 +17,15 @@ limitations under the License. package remote import ( + "bytes" "context" + "encoding/json" "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" polarxIo "github.com/alibaba/polardbx-operator/pkg/util/io" + polarxPath "github.com/alibaba/polardbx-operator/pkg/util/path" "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/eapache/queue" "io" "os" "path/filepath" @@ -58,10 +63,41 @@ func (o *aliyunOssFs) DeleteFile(ctx context.Context, path string, auth, params if err != nil { return fmt.Errorf("failed to open oss bucket: %w", err) } - return bucket.DeleteObject(path) } +func (o aliyunOssFs) DeleteExpiredFile(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + ossCtx, err := newAliyunOssContext(ctx, auth, params) + if err != nil { + return nil, err + } + client, err := o.newClient(ossCtx) + if err != nil { + return nil, fmt.Errorf("failed to create oss client: %w", err) + } + bucket, err := client.Bucket(ossCtx.bucket) + if err != nil { + return nil, fmt.Errorf("failed to open oss bucket: %w", err) + } + ft := newFileTask(ctx) + go func() { + err := o.ListFileWithDeadline(path, bucket, ossCtx.deadline, func(objs []string) error { + _, err := bucket.DeleteObjects(objs) + if err != nil { + return err + } + for _, obj := range objs { + if val, ok := ctx.Value(common.AffectedFiles).(*[]string); ok { + *val = append(*val, obj) + } + } + return nil + }) + ft.complete(err) + }() + return ft, nil +} + type ossProgressListener4FileTask struct { *fileTask } @@ -183,7 +219,12 @@ func (o *aliyunOssFs) UploadFile(ctx context.Context, reader io.Reader, path str }() var uploadedLen int64 parts := make([]oss.UploadPart, 0) + emptyBytes := make([]byte, 0) for { + _, err := pipeReader.Read(emptyBytes) + if err != nil { + break + } limitedReader := io.LimitReader(pipeReader, limitReaderSize) uploadPart, err := bucket.UploadPart(imur, limitedReader, limitReaderSize, partIndex, opts...) partIndex++ @@ -458,6 +499,135 @@ func (o *aliyunOssFs) DownloadFile(ctx context.Context, writer io.Writer, path s return ft, nil } +func (o *aliyunOssFs) ListFiles(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) { + ossCtx, err := newAliyunOssContext(ctx, auth, params) + if err != nil { + return nil, err + } + + client, err := o.newClient(ossCtx) + if err != nil { + return nil, fmt.Errorf("failed to create oss client: %w", err) + } + bucket, err := client.Bucket(ossCtx.bucket) + if err != nil { + return nil, fmt.Errorf("failed to open oss bucket: %w", err) + } + + ft := newFileTask(ctx) + go func() { + // list entries in path use oss sdk + entryNames := make([]string, 0) + marker := "" + prefix := oss.Prefix(path) + delimiter := oss.Delimiter("/") + for { + lsRes, err := bucket.ListObjects(oss.Marker(marker), prefix, delimiter) + if err != nil { + ft.complete(fmt.Errorf("failed to list oss objects in path %s: %w", path, err)) + return + } + for _, object := range lsRes.Objects { // file + entryNames = append(entryNames, polarxPath.GetBaseNameFromPath(object.Key)) + } + for _, dir := range lsRes.CommonPrefixes { // subdirectory + entryNames = append(entryNames, polarxPath.GetBaseNameFromPath(dir)) + } + if lsRes.IsTruncated { + marker = lsRes.NextMarker + } else { + break + } + } + + // parse entry slice and send response + encodedEntryNames, err := json.Marshal(entryNames) + if err != nil { + ft.complete(fmt.Errorf("failed to encode entry name slice,: %w", err)) + return + } + if ossCtx.writeLen { + bytesCount := int64(len(encodedEntryNames)) + err := polarxIo.WriteUint64(writer, uint64(bytesCount)) + if err != nil { + ft.complete(fmt.Errorf("failed to send content bytes count: %w", err)) + return + } + _, err = io.CopyN(writer, bytes.NewReader(encodedEntryNames), bytesCount) + } else { + _, err = io.Copy(writer, bytes.NewReader(encodedEntryNames)) + } + if err != nil { + ft.complete(fmt.Errorf("failed to copy content: %w", err)) + return + } + ft.complete(nil) + }() + return ft, nil +} + +func (o *aliyunOssFs) ListFileWithDeadline(path string, bucket *oss.Bucket, deadline int64, callback func([]string) error) error { + marker := "" + delimiter := oss.Delimiter("/") + fileQueue := queue.New() + fileQueue.Add([]string{path, marker}) + for fileQueue.Length() != 0 { + element := fileQueue.Remove().([]string) + lsRes, err := bucket.ListObjects(oss.Marker(element[1]), oss.Prefix(element[0]), delimiter) + if err != nil { + return err + } + if lsRes.IsTruncated { + fileQueue.Add([]string{element[0], lsRes.NextMarker}) + } + for _, commonPrefix := range lsRes.CommonPrefixes { + fileQueue.Add([]string{commonPrefix, ""}) + } + objs := make([]string, 0) + for _, obj := range lsRes.Objects { + if obj.LastModified.Unix() < deadline { + // delete it + objs = append(objs, obj.Key) + } + } + if len(objs) > 0 { + err := callback(objs) + if err != nil { + return err + } + } + } + return nil +} + +func (o *aliyunOssFs) ListAllFiles(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + ossCtx, err := newAliyunOssContext(ctx, auth, params) + if err != nil { + return nil, err + } + client, err := o.newClient(ossCtx) + if err != nil { + return nil, fmt.Errorf("failed to create oss client: %w", err) + } + bucket, err := client.Bucket(ossCtx.bucket) + if err != nil { + return nil, fmt.Errorf("failed to open oss bucket: %w", err) + } + ft := newFileTask(ctx) + go func() { + err := o.ListFileWithDeadline(path, bucket, ossCtx.deadline, func(objs []string) error { + for _, obj := range objs { + if val, ok := ctx.Value(common.AffectedFiles).(*[]string); ok { + *val = append(*val, obj) + } + } + return nil + }) + ft.complete(err) + }() + return ft, nil +} + type aliyunOssContext struct { ctx context.Context @@ -469,6 +639,7 @@ type aliyunOssContext struct { writeLen bool bufferSize int64 useTmpFile bool + deadline int64 } func newAliyunOssContext(ctx context.Context, auth, params map[string]string) (*aliyunOssContext, error) { @@ -496,6 +667,14 @@ func newAliyunOssContext(ctx context.Context, auth, params map[string]string) (* } useTmpFile = toUseTmpFile } + var deadline int64 = 0 + if val, ok := params["deadline"]; ok { + parsedDeadline, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, err + } + deadline = parsedDeadline + } ossCtx := &aliyunOssContext{ ctx: ctx, endpoint: auth["endpoint"], @@ -505,6 +684,7 @@ func newAliyunOssContext(ctx context.Context, auth, params map[string]string) (* writeLen: writeLen, bufferSize: bufferSize, useTmpFile: useTmpFile, + deadline: deadline, } if t, ok := params["retention-time"]; ok { diff --git a/pkg/hpfs/remote/aliyun_oss_test.go b/pkg/hpfs/remote/aliyun_oss_test.go new file mode 100644 index 0000000..d074a4e --- /dev/null +++ b/pkg/hpfs/remote/aliyun_oss_test.go @@ -0,0 +1,47 @@ +package remote + +import ( + "context" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "strconv" + "testing" + "time" +) + +func TestDeleteExpiredFilesOnOss(t *testing.T) { + auth := map[string]string{} + params := map[string]string{} + + auth["endpoint"] = "oss-cn-beijing.aliyuncs.com" + auth["access_key"] = "" + auth["access_secret"] = "" + params["bucket"] = "beijing-busu" + params["deadline"] = strconv.FormatInt(time.Now().Unix(), 10) + fileService, _ := GetFileService("aliyun-oss") + + expiredFiles := make([]string, 0) + expiredFilesPtr := &expiredFiles + ctx := context.WithValue(context.Background(), common.AffectedFiles, expiredFilesPtr) + ft, _ := fileService.DeleteExpiredFile(ctx, "binlogbackup/default/rebuild-demo/67c43e24-c18e-4821-82bd-996db340bf01/", auth, params) + ft.Wait() + val, _ := ctx.Value(common.AffectedFiles).(*[]string) + fmt.Println(*val) +} + +func TestDeleteExpiredFilesOnSftp(t *testing.T) { + auth := map[string]string{} + auth["port"] = "22" + auth["host"] = "11.165.72.152" + auth["username"] = "root" + auth["password"] = "ATP@linux2016" + params := map[string]string{} + params["deadline"] = strconv.FormatInt(time.Now().Unix(), 10) + fileService, _ := GetFileService("sftp") + expiredFiles := make([]string, 0) + expiredFilesPtr := &expiredFiles + ctx := context.WithValue(context.Background(), common.AffectedFiles, expiredFilesPtr) + ft, _ := fileService.DeleteExpiredFile(ctx, "busuhhhh", auth, params) + ft.Wait() + fmt.Println(*expiredFilesPtr) +} diff --git a/pkg/hpfs/remote/ftp.go b/pkg/hpfs/remote/ftp.go index 6cba632..9ba1437 100644 --- a/pkg/hpfs/remote/ftp.go +++ b/pkg/hpfs/remote/ftp.go @@ -114,3 +114,15 @@ func (f *ftpFs) DownloadFile(ctx context.Context, writer io.Writer, path string, return ft, nil } + +func (f *ftpFs) DeleteExpiredFile(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} + +func (f *ftpFs) ListFiles(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} + +func (o *ftpFs) ListAllFiles(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} diff --git a/pkg/hpfs/remote/hdfs.go b/pkg/hpfs/remote/hdfs.go index 9593d94..d29663a 100644 --- a/pkg/hpfs/remote/hdfs.go +++ b/pkg/hpfs/remote/hdfs.go @@ -133,3 +133,15 @@ func (h *hdfsFs) DownloadFile(ctx context.Context, writer io.Writer, path string return ft, nil } + +func (h *hdfsFs) DeleteExpiredFile(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} + +func (h *hdfsFs) ListFiles(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} + +func (h *hdfsFs) ListAllFiles(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} diff --git a/pkg/hpfs/remote/remote.go b/pkg/hpfs/remote/remote.go index a93a06f..69cf611 100644 --- a/pkg/hpfs/remote/remote.go +++ b/pkg/hpfs/remote/remote.go @@ -31,6 +31,9 @@ type FileService interface { DeleteFile(ctx context.Context, path string, auth, params map[string]string) error UploadFile(ctx context.Context, reader io.Reader, path string, auth, params map[string]string) (FileTask, error) DownloadFile(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) + DeleteExpiredFile(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) + ListFiles(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) + ListAllFiles(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) } type fileTask struct { diff --git a/pkg/hpfs/remote/sftp.go b/pkg/hpfs/remote/sftp.go index 0f4dc48..cf7428c 100644 --- a/pkg/hpfs/remote/sftp.go +++ b/pkg/hpfs/remote/sftp.go @@ -19,8 +19,12 @@ package remote import ( "context" "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/eapache/queue" + "github.com/pkg/errors" "io" "os" + "path/filepath" "strconv" "time" @@ -66,7 +70,8 @@ func (s *sftpFs) newSshConn(sftpCtx *sftpContext) (*ssh.Client, error) { Auth: []ssh.AuthMethod{ ssh.Password(sftpCtx.password), }, - Timeout: 2 * time.Second, + Timeout: 2 * time.Second, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), }) } @@ -164,3 +169,136 @@ func (s *sftpFs) DownloadFile(ctx context.Context, writer io.Writer, path string return ft, nil } + +func (s *sftpFs) DeleteExpiredFile(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + sftpCtx, err := newSftpContext(ctx, auth, params) + if err != nil { + return nil, err + } + + conn, err := s.newSshConn(sftpCtx) + if err != nil { + return nil, fmt.Errorf("ssh connection failure: %w", err) + } + + ft := newFileTask(ctx) + var deadline int64 + if val, ok := params["deadline"]; ok { + parsedDeadline, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, err + } + deadline = parsedDeadline + } else { + return nil, fmt.Errorf("deadline param is required") + } + go func() { + defer conn.Close() + client, err := sftp.NewClient(conn) + if err != nil { + ft.complete(err) + return + } + defer client.Close() + dirs, files, err := s.ListFileWithDeadline(client, path, deadline) + for _, file := range files { + err := client.Remove(file) + if err != nil { + ft.complete(err) + return + } + if val, ok := ctx.Value(common.AffectedFiles).(*[]string); ok { + *val = append(*val, file) + } + } + if len(dirs) > 0 { + for i := len(dirs) - 1; i >= 0; i-- { + fileInfos, err := client.ReadDir(dirs[i]) + if err != nil { + ft.complete(errors.Wrap(err, fmt.Sprintf("failed to readdir filepath=%s", dirs[i]))) + return + } + if len(fileInfos) == 0 { + err := client.Remove(dirs[i]) + if err != nil { + ft.complete(errors.Wrap(err, fmt.Sprintf("failed to remove filepath=%s", dirs[i]))) + return + } + } + } + } + ft.complete(nil) + }() + return ft, nil +} + +func (s *sftpFs) ListFileWithDeadline(client *sftp.Client, path string, deadline int64) ([]string, []string, error) { + fileQueue := queue.New() + fileQueue.Add(path) + var dirs []string + var files []string + for fileQueue.Length() > 0 { + currentPath := fileQueue.Remove().(string) + dirs = append(dirs, currentPath) + fileInfos, err := client.ReadDir(currentPath) + if err != nil { + return nil, nil, err + } + for _, fi := range fileInfos { + newFilepath := filepath.Join(currentPath, fi.Name()) + if fi.IsDir() { + fileQueue.Add(newFilepath) + } else { + if fi.ModTime().Unix() < deadline { + files = append(files, newFilepath) + } + } + } + } + return dirs, files, nil +} + +func (s *sftpFs) ListFiles(ctx context.Context, writer io.Writer, path string, auth, params map[string]string) (FileTask, error) { + panic("Not implemented") +} + +func (s *sftpFs) ListAllFiles(ctx context.Context, path string, auth, params map[string]string) (FileTask, error) { + sftpCtx, err := newSftpContext(ctx, auth, params) + if err != nil { + return nil, err + } + + conn, err := s.newSshConn(sftpCtx) + if err != nil { + return nil, fmt.Errorf("ssh connection failure: %w", err) + } + + ft := newFileTask(ctx) + var deadline int64 + if val, ok := params["deadline"]; ok { + parsedDeadline, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, err + } + deadline = parsedDeadline + } else { + return nil, fmt.Errorf("deadline param is required") + } + go func() { + defer conn.Close() + client, err := sftp.NewClient(conn) + if err != nil { + ft.complete(err) + return + } + defer client.Close() + _, files, err := s.ListFileWithDeadline(client, path, deadline) + for _, file := range files { + if val, ok := ctx.Value(common.AffectedFiles).(*[]string); ok { + *val = append(*val, file) + } + } + ft.complete(nil) + }() + return ft, nil +} diff --git a/pkg/k8s/control/context.go b/pkg/k8s/control/context.go index e90a011..e9f125e 100644 --- a/pkg/k8s/control/context.go +++ b/pkg/k8s/control/context.go @@ -152,7 +152,7 @@ func (rc *BaseReconcileContext) ExecuteCommandOn(pod *corev1.Pod, container stri opts.setDefaults() logger := opts.Logger - logger.Info("Executing command", "pod", pod.Namespace, "container", container, "command", command, "timeout", opts.Timeout) + logger.Info("Executing command", "pod", pod.Name, "container", container, "command", command, "timeout", opts.Timeout) // Start execute req := rc.clientSet. diff --git a/pkg/k8s/helper/pod.go b/pkg/k8s/helper/pod.go index 612ac4a..f591076 100644 --- a/pkg/k8s/helper/pod.go +++ b/pkg/k8s/helper/pod.go @@ -18,6 +18,7 @@ package helper import ( corev1 "k8s.io/api/core/v1" + "time" ) func IsPodRunning(pod *corev1.Pod) bool { @@ -84,11 +85,13 @@ func MustGetContainerFromPod(pod *corev1.Pod, name string) *corev1.Container { } func IsPodDeletedOrFailed(po *corev1.Pod) bool { - return IsPodDeleted(po) || IsPodFailed(po) + return IsPodDeleted(po, 32*time.Second) || IsPodFailed(po) } -func IsPodDeleted(po *corev1.Pod) bool { - return po != nil && !po.DeletionTimestamp.IsZero() +func IsPodDeleted(po *corev1.Pod, delaySeconds time.Duration) bool { + now := time.Now() + now = now.Add(-delaySeconds) + return po != nil && !po.DeletionTimestamp.IsZero() && po.DeletionTimestamp.UTC().Before(now) } func IsPodFailed(po *corev1.Pod) bool { diff --git a/pkg/k8s/helper/service.go b/pkg/k8s/helper/service.go index 9adf4a9..36606d7 100644 --- a/pkg/k8s/helper/service.go +++ b/pkg/k8s/helper/service.go @@ -48,6 +48,14 @@ func GetClusterAddrFromService(svc *corev1.Service, port string) (string, error) return fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, svcPort.Port), nil } +func GetClusterIpPortFromService(svc *corev1.Service, port string) (string, int32, error) { + svcPort := GetPortFromService(svc, port) + if svcPort == nil { + return "", -1, errors.New("port not found: " + port) + } + return svc.Spec.ClusterIP, svcPort.Port, nil +} + func GetServiceDNSRecord(svcName, namespace string, withinNamespace bool) string { if withinNamespace { return svcName diff --git a/pkg/k8s/prometheus/TBD b/pkg/k8s/prometheus/TBD new file mode 100644 index 0000000..e69de29 diff --git a/pkg/meta/core/gms/manager.go b/pkg/meta/core/gms/manager.go index 78c21a1..2fa8564 100644 --- a/pkg/meta/core/gms/manager.go +++ b/pkg/meta/core/gms/manager.go @@ -139,7 +139,14 @@ var ( xcluster80VersionPattern = regexp.MustCompile("8\\.0\\.\\S+-X-Cluster-\\S+") ) -func GetStorageType(engine string, version string) (StorageType, error) { +func GetStorageType(engine string, version string, annotationStorageType string) (StorageType, error) { + if annotationStorageType != "" { + storageType, err := strconv.ParseInt(annotationStorageType, 10, 63) + if err != nil { + return 0, err + } + return StorageType(int32(storageType)), nil + } switch engine { case "xcluster": if xcluster80VersionPattern.MatchString(version) { diff --git a/pkg/meta/core/group/group_manager.go b/pkg/meta/core/group/group_manager.go index 31fb239..c8bc767 100644 --- a/pkg/meta/core/group/group_manager.go +++ b/pkg/meta/core/group/group_manager.go @@ -120,6 +120,7 @@ type GroupManager interface { GetTrans(column string, table string) (map[string]bool, error) IsTransCommited(column string, table string) error SendHeartBeat(sname string) error + CheckFileStorageCompatibility() (bool, error) ListFileStorage() ([]polardbx.FileStorageInfo, error) CreateFileStorage(info polardbx.FileStorageInfo, config config.Config) error DropFileStorage(fileStorageName string) error @@ -158,16 +159,32 @@ func (m *groupManager) SendHeartBeat(sname string) error { retryLimits := 10 retry := 0 for retry < retryLimits { + retry++ tx, err := conn.BeginTx(m.ctx, &sql.TxOptions{}) - now := time.Now() - _, err = tx.ExecContext(m.ctx, fmt.Sprintf("replace into `__cdc__`.`__cdc_heartbeat__`(id, sname, gmt_modified) values(%d, %s, '%s')", now.Unix(), sname, now.Format("2006-01-02 15:04:05"))) if err != nil { - tx.Rollback() - } else { - tx.Commit() - break + continue + } + + now := time.Now() + queries := []string{ + "set drds_transaction_policy='TSO'", + fmt.Sprintf("replace into `__cdc__`.`__cdc_heartbeat__`(id, sname, gmt_modified) values(%d, %s, '%s')", + now.Unix(), sname, now.Format("2006-01-02 15:04:05")), + } + + for _, query := range queries { + _, err = tx.ExecContext(m.ctx, query) + if err != nil { + _ = tx.Rollback() + break + } + } + if err == nil { + err = tx.Commit() + if err == nil { + return nil + } } - retry++ } return err } @@ -614,6 +631,23 @@ func (m *groupManager) Close() error { return nil } +func (m *groupManager) CheckFileStorageCompatibility() (bool, error) { + conn, err := m.getConn("") + if err != nil { + return false, err + } + defer dbutil.DeferClose(conn) + + // Check compatibility + showTablesStmt := `SHOW TABLES FROM metadb LIKE 'file_storage_info'` + rs, err := conn.QueryContext(m.ctx, showTablesStmt) + if err != nil { + return false, err + } + defer dbutil.DeferClose(rs) + return rs.Next(), nil +} + func (m *groupManager) ListFileStorage() ([]polardbx.FileStorageInfo, error) { var fileStorageInfoList []polardbx.FileStorageInfo conn, err := m.getConn("") @@ -775,13 +809,13 @@ func (m *groupManager) GetBinlogOffset() (string, error) { if !rs.Next() { return "", errors.New("no rows returned") } - var file_name, file_size, binlog_do_db, binlog_ignore_db, executed_gtid_set string + var fileName, fileSize, binlogDoDb, binlogIgnoreDb, executedGtidSet string - err = rs.Scan(&file_name, &file_size, &binlog_do_db, &binlog_ignore_db, &executed_gtid_set) + err = rs.Scan(&fileName, &fileSize, &binlogDoDb, &binlogIgnoreDb, &executedGtidSet) if err != nil { return "", nil } - ans := file_name + ":" + file_size + ans := fileName + ":" + fileSize return ans, nil } diff --git a/pkg/operator/v1/config/config.go b/pkg/operator/v1/config/config.go index 997cab6..d0bba9e 100644 --- a/pkg/operator/v1/config/config.go +++ b/pkg/operator/v1/config/config.go @@ -17,7 +17,9 @@ limitations under the License. package config import ( + "strconv" "strings" + "time" "github.com/distribution/distribution/reference" @@ -28,6 +30,7 @@ type config struct { ImagesConfig imagesConfig `json:"images,omitempty"` SchedulerConfig schedulerConfig `json:"scheduler,omitempty"` ClusterConfig clusterConfig `json:"cluster,omitempty"` + BackupConfig backupConfig `json:"backup,omitempty"` StoreConfig storeConfig `json:"store,omitempty"` SecurityConfig securityConfig `json:"security,omitempty"` OssConfig ossConfig `json:"oss,omitempty"` @@ -46,6 +49,10 @@ func (c *config) Cluster() ClusterConfig { return &c.ClusterConfig } +func (c *config) Backup() BackupConfig { + return &c.BackupConfig +} + func (c *config) Store() StoreConfig { return &c.StoreConfig } @@ -146,6 +153,11 @@ func (c *imagesConfig) DefaultImageForStore(engine, container string, version st return newImage(image, c.Repo, version) } +func (c *imagesConfig) DefaultJobImage() string { + image := c.Common["job"] + return newImage(image, c.Repo, "") +} + type schedulerConfig struct { EnableMaster bool `json:"enable_master,omitempty"` } @@ -158,6 +170,7 @@ type clusterConfig struct { OptionEnableExporters bool `json:"enable_exporters,omitempty"` OptionEnableAliyunAckResourceController bool `json:"enable_aliyun_ack_resource_controller,omitempty"` OptionEnableDebugModeForComputeNodes bool `json:"enable_debug_mode_for_compute_nodes,omitempty"` + OptionEnableRunModeCheck bool `json:"enable_run_mode_check,omitempty"` OptionEnablePrivilegedContainer bool `json:"enable_privileged_container,omitempty"` OptionForceCGroup bool `json:"force_cgroup,omitempty"` } @@ -174,6 +187,10 @@ func (c *clusterConfig) EnableDebugModeForComputeNodes() bool { return c.OptionEnableDebugModeForComputeNodes } +func (c *clusterConfig) EnableRunModeCheck() bool { + return c.OptionEnableRunModeCheck +} + func (c *clusterConfig) ContainerPrivileged() bool { return c.OptionEnablePrivilegedContainer } @@ -186,6 +203,16 @@ type storeConfig struct { EnablePrivilegedContainer bool `json:"enable_privileged_container,omitempty"` HostPaths map[string]string `json:"host_paths,omitempty"` HpfsEndpoint string `json:"hpfs_endpoint,omitempty"` + FsEndpoint string `json:"fs_endpoint,omitempty"` + MaxAutoRebuildingCount string `json:"max_auto_rebuilding_count,omitempty"` +} + +func (c *storeConfig) GetMaxAutoRebuildingCount() int { + val, err := strconv.Atoi(defaults.NonEmptyStrOrDefault(c.MaxAutoRebuildingCount, "1")) + if err != nil { + panic(err) + } + return val } func (c *storeConfig) ContainerPrivileged() bool { @@ -212,6 +239,10 @@ func (c *storeConfig) HostPathFileServiceEndpoint() string { return c.HpfsEndpoint } +func (c *storeConfig) FilestreamServiceEndpoint() string { + return c.FsEndpoint +} + type securityConfig struct { EncodeKey string `json:"encode_key,omitempty"` } @@ -255,3 +286,23 @@ func (c *nfsConfig) Path() string { func (c *nfsConfig) Server() string { return c.NfsServer } + +type backupConfig struct { + CheckBinlogExpiredInterval string `json:"check_binlog_expired_interval,omitempty"` + HeartbeatJobNamePrefix string `json:"heartbeat_job_name_prefix,omitempty"` + HeartbeatInterval string `json:"heartbeat_interval,omitempty"` +} + +func (b *backupConfig) CheckBinlogExpiredFileInterval() (time.Duration, error) { + interval := defaults.NonEmptyStrOrDefault(b.CheckBinlogExpiredInterval, "3600s") + return time.ParseDuration(interval) +} + +func (b *backupConfig) GetHeartbeatJobNamePrefix() string { + return defaults.NonEmptyStrOrDefault(b.HeartbeatJobNamePrefix, "heartbeat-") +} + +func (b *backupConfig) GetHeartbeatInterval() (time.Duration, error) { + interval := defaults.NonEmptyStrOrDefault(b.HeartbeatInterval, "1s") + return time.ParseDuration(interval) +} diff --git a/pkg/operator/v1/config/interface.go b/pkg/operator/v1/config/interface.go index df24a14..b981b6d 100644 --- a/pkg/operator/v1/config/interface.go +++ b/pkg/operator/v1/config/interface.go @@ -16,9 +16,12 @@ limitations under the License. package config +import "time" + type Config interface { Images() ImagesConfig Cluster() ClusterConfig + Backup() BackupConfig Store() StoreConfig Scheduler() SchedulerConfig Security() SecurityConfig @@ -36,6 +39,7 @@ type SchedulerConfig interface { type ImagesConfig interface { DefaultImageRepo() string + DefaultJobImage() string DefaultImageForCluster(role string, container string, version string) string DefaultImageForStore(engine string, container string, version string) string } @@ -44,6 +48,7 @@ type ClusterConfig interface { EnableExporters() bool EnableAliyunAckResourceController() bool EnableDebugModeForComputeNodes() bool + EnableRunModeCheck() bool ContainerPrivileged() bool ForceCGroup() bool } @@ -57,6 +62,8 @@ type StoreConfig interface { HostPathFilestreamVolumeRoot() string HostPathFileServiceEndpoint() string + FilestreamServiceEndpoint() string + GetMaxAutoRebuildingCount() int } type OssConfig interface { @@ -70,3 +77,9 @@ type NfsConfig interface { Path() string Server() string } + +type BackupConfig interface { + CheckBinlogExpiredFileInterval() (time.Duration, error) + GetHeartbeatJobNamePrefix() string + GetHeartbeatInterval() (time.Duration, error) +} diff --git a/pkg/operator/v1/operator.go b/pkg/operator/v1/operator.go index 2182285..716ce55 100644 --- a/pkg/operator/v1/operator.go +++ b/pkg/operator/v1/operator.go @@ -18,6 +18,7 @@ package v1 import ( "context" + systemtaskv1controllers "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/controllers" "os" promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -162,6 +163,36 @@ func setupPolarDBXControllers(opts controllerOptions) error { return err } + systemTaskReconciler := systemtaskv1controllers.SystemTaskReconciler{ + BaseRc: opts.BaseReconcileContext, + LoaderFactory: opts.LoaderFactory, + Logger: ctrl.Log.WithName("controller").WithName("polardbxsystemtask"), + MaxConcurrency: opts.opts.MaxConcurrentReconciles, + } + if err := systemTaskReconciler.SetupWithManager(opts.Manager); err != nil { + return err + } + + polardbxBackupBinlogReconciler := polardbxv1controllers.PolarDBXBackupBinlogReconciler{ + BaseRc: opts.BaseReconcileContext, + LoaderFactory: opts.LoaderFactory, + Logger: ctrl.Log.WithName("controller").WithName("polardbxbackupbinlog"), + MaxConcurrency: opts.opts.MaxConcurrentReconciles, + } + if err := polardbxBackupBinlogReconciler.SetupWithManager(opts.Manager); err != nil { + return err + } + + backupScheduleReconciler := polardbxv1controllers.PolarDBXBackupScheduleReconciler{ + BaseRc: opts.BaseReconcileContext, + LoaderFactory: opts.LoaderFactory, + Logger: ctrl.Log.WithName("controller").WithName("polardbxbackupschedule"), + MaxConcurrency: opts.opts.MaxConcurrentReconciles, + } + if err := backupScheduleReconciler.SetupWithManager(opts.Manager); err != nil { + return err + } + return nil } @@ -211,12 +242,12 @@ func setupXStoreBackupControllers(opts controllerOptions) error { // to handle signals correctly. The second parameter opts defines the configurable options of controllers. // // Currently, these controllers are included: -// 1. Controller for PolarDBXCluster (v1) -// 2. Controller for XStore (v1) -// 3. Controllers for PolarDBXBackup, PolarDBXBinlogBackup (v1) -// 4. Controllers for XStoreBackup, XStoreBinlogBackup (v1) -// 5. Controllers for PolarDBXBackupSchedule, PolarDBXBinlogBackupSchedule (v1) -// 6. Controllers for PolarDBXParameter (v1) +// 1. Controller for PolarDBXCluster (v1) +// 2. Controller for XStore (v1) +// 3. Controllers for PolarDBXBackup, PolarDBXBinlogBackup (v1) +// 4. Controllers for XStoreBackup, XStoreBinlogBackup (v1) +// 5. Controllers for PolarDBXBackupSchedule, PolarDBXBinlogBackupSchedule (v1) +// 6. Controllers for PolarDBXParameter (v1) func Start(ctx context.Context, opts Options) { // Start instruction loader. hint.StartLoader(ctx) @@ -317,7 +348,7 @@ func Start(ctx context.Context, opts Options) { } } else { // Defaults ot setup on manager. - err = webhook.SetupWebhooks(ctx, mgr, opts.ConfigPath) + err = webhook.SetupWebhooks(ctx, mgr, opts.ConfigPath, configLoaderFactory()) if err != nil { setupLog.Error(err, "Unable to setup webhooks...") os.Exit(1) diff --git a/pkg/operator/v1/polardbx/controllers/polardbxbackup_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxbackup_controller.go index ab6e946..59a0a00 100644 --- a/pkg/operator/v1/polardbx/controllers/polardbxbackup_controller.go +++ b/pkg/operator/v1/polardbx/controllers/polardbxbackup_controller.go @@ -22,6 +22,7 @@ import ( "github.com/alibaba/polardbx-operator/pkg/k8s/control" "github.com/alibaba/polardbx-operator/pkg/operator/hint" "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" polardbxreconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" commonsteps "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/steps/backup/common" "github.com/go-logr/logr" @@ -45,7 +46,7 @@ type PolarDBXBackupReconciler struct { } func (r *PolarDBXBackupReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log := r.Logger.WithValues("namespace", request.Namespace, "polardbxcluster", request.Name) + log := r.Logger.WithValues("namespace", request.Namespace, "polardbxbackup", request.Name) if hint.IsNamespacePaused(request.Namespace) { log.Info("Reconciling is paused, skip") @@ -74,6 +75,12 @@ func (r *PolarDBXBackupReconciler) Reconcile(ctx context.Context, request reconc Name: polardbxBackup.Spec.Cluster.Name, }) + // check whether backup is dummy + if polardbxBackup.Annotations[meta.AnnotationDummyBackup] == "true" { + log.Info("Dummy polardbx backup, skip") + return reconcile.Result{}, nil + } + return r.reconcile(rc, polardbxBackup, log) } @@ -114,6 +121,12 @@ func (r *PolarDBXBackupReconciler) newReconcileTask(rc *polardbxreconcile.Contex case polardbxv1.BinlogBackuping: commonsteps.WaitAllBinlogJobFinished(task) commonsteps.SavePXCSecrets(task) + commonsteps.TransferPhaseTo(polardbxv1.MetadataBackuping, false)(task) + case polardbxv1.MetadataBackuping: + // In order to mitigate effect of cache, avoiding duplicate uploads + defer control.ScheduleAfter(10*time.Second)(task, true) + + commonsteps.UploadClusterMetadata(task) commonsteps.TransferPhaseTo(polardbxv1.BackupFinished, false)(task) case polardbxv1.BackupFinished: commonsteps.UnLockXStoreBinlogPurge(task) @@ -121,6 +134,7 @@ func (r *PolarDBXBackupReconciler) newReconcileTask(rc *polardbxreconcile.Contex commonsteps.RemoveBackupOverRetention(task) log.Info("Finished phase.") case polardbxv1.BackupFailed: + commonsteps.UnLockXStoreBinlogPurge(task) commonsteps.DeleteBackupJobsOnFailure(task) log.Info("Failed phase.") default: @@ -142,8 +156,8 @@ func (r *PolarDBXBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.PolarDBXBackup{}). diff --git a/pkg/operator/v1/polardbx/controllers/polardbxbackupbinlog_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxbackupbinlog_controller.go new file mode 100644 index 0000000..eed79c8 --- /dev/null +++ b/pkg/operator/v1/polardbx/controllers/polardbxbackupbinlog_controller.go @@ -0,0 +1,117 @@ +package controllers + +import ( + "context" + "errors" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/hint" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + polardbxreconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + backupbinlog "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/steps/backupbinlog" + polarxJson "github.com/alibaba/polardbx-operator/pkg/util/json" + "github.com/go-logr/logr" + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "time" +) + +type PolarDBXBackupBinlogReconciler struct { + BaseRc *control.BaseReconcileContext + Logger logr.Logger + config.LoaderFactory + + MaxConcurrency int +} + +func (r *PolarDBXBackupBinlogReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := r.Logger.WithValues("namespace", request.Namespace, "polardbxcluster", request.Name) + defer func() { + err := recover() + if err != nil { + log.Error(errors.New(polarxJson.Convert2JsonString(err)), "") + } + }() + if hint.IsNamespacePaused(request.Namespace) { + log.Info("Reconciling is paused, skip") + return reconcile.Result{}, nil + } + + rc := polardbxreconcile.NewContext( + control.NewBaseReconcileContextFrom(r.BaseRc, ctx, request), + r.LoaderFactory(), + ) + rc.SetBackupBinlogKey(request.NamespacedName) + defer rc.Close() + + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + + rc.SetPolarDBXKey(types.NamespacedName{ + Namespace: request.Namespace, + Name: backupBinlog.Spec.PxcName, + }) + return r.reconcile(rc, backupBinlog, log) +} + +func (r *PolarDBXBackupBinlogReconciler) reconcile(rc *polardbxreconcile.Context, backupBinlog *polardbxv1.PolarDBXBackupBinlog, log logr.Logger) (reconcile.Result, error) { + log = log.WithValues("phase", backupBinlog.Status.Phase) + + task := r.newReconcileTask(rc, backupBinlog, log) + return control.NewExecutor(log).Execute(rc, task) +} + +func (r *PolarDBXBackupBinlogReconciler) newReconcileTask(rc *polardbxreconcile.Context, backup *polardbxv1.PolarDBXBackupBinlog, log logr.Logger) *control.Task { + log = log.WithValues("phase", backup.Status.Phase) + task := control.NewTask() + defer backupbinlog.PersistentBackupBinlog(task, true) + + backupbinlog.WhenDeleting( + backupbinlog.TransferPhaseTo(polardbxv1.BackupBinlogPhaseDeleting, true), + )(task) + + switch backup.Status.Phase { + case polardbxv1.BackupBinlogPhaseNew: + backupbinlog.InitFromPxc(task) + backupbinlog.AddFinalizer(task) + backupbinlog.TransferPhaseTo(polardbxv1.BackupBinlogPhaseRunning)(task) + case polardbxv1.BackupBinlogPhaseRunning: + backupbinlog.WhenPxcExist( + backupbinlog.SyncInfo, + backupbinlog.ReconcileHeartbeatJob, + backupbinlog.UpdateObservedGeneration, + backupbinlog.RunningRoute, + )(task) + case polardbxv1.BackupBinlogPhaseCheckExpiredFile: + backupbinlog.TryDeleteExpiredFiles(task) + backupbinlog.TransferPhaseTo(polardbxv1.BackupBinlogPhaseRunning)(task) + case polardbxv1.BackupBinlogPhaseDeleting: + backupbinlog.WhenPxcExist( + backupbinlog.CleanFromPxc, + backupbinlog.TryDeleteHeartbeatJob, + backupbinlog.CloseBackupBinlog, + )(task) + backupbinlog.TryDeleteExpiredFiles(task) + backupbinlog.ConfirmRemoteEmptyFiles(task) + backupbinlog.RemoveFinalizer(task) + } + + return task +} + +func (r *PolarDBXBackupBinlogReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{ + MaxConcurrentReconciles: r.MaxConcurrency, + RateLimiter: workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, + ), + }). + For(&polardbxv1.PolarDBXBackupBinlog{}). + Complete(r) +} diff --git a/pkg/operator/v1/polardbx/controllers/polardbxbackupschedule_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxbackupschedule_controller.go new file mode 100644 index 0000000..d107dd2 --- /dev/null +++ b/pkg/operator/v1/polardbx/controllers/polardbxbackupschedule_controller.go @@ -0,0 +1,109 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/hint" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + polardbxreconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/steps/backup/schedule" + "github.com/go-logr/logr" + "golang.org/x/time/rate" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "time" +) + +type PolarDBXBackupScheduleReconciler struct { + BaseRc *control.BaseReconcileContext + Logger logr.Logger + config.LoaderFactory + MaxConcurrency int +} + +func (r *PolarDBXBackupScheduleReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := r.Logger.WithValues("namespace", request.Namespace, "polardbxbackupschedule", request.Name) + + if hint.IsNamespacePaused(request.Namespace) { + log.Info("Reconcile is paused, skip") + return reconcile.Result{}, nil + } + rc := polardbxreconcile.NewContext( + control.NewBaseReconcileContextFrom(r.BaseRc, ctx, request), + r.LoaderFactory(), + ) + rc.SetPolarDBXBackupScheduleKey(request.NamespacedName) + defer rc.Close() + + backupSchedule, err := rc.GetPolarDBXBackupSchedule() + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Object not found, might be deleted, just ignore.") + return reconcile.Result{}, nil + } + log.Error(err, "Unable to get polardbx backup schedule object.") + return reconcile.Result{}, err + } + + if backupSchedule.Spec.Suspend { + log.Info("Backup schedule suspended, just skip.") + return reconcile.Result{}, nil + } + + return r.reconcile(rc, backupSchedule, log) +} + +func (r *PolarDBXBackupScheduleReconciler) reconcile(rc *polardbxreconcile.Context, backupSchedule *polardbxv1.PolarDBXBackupSchedule, log logr.Logger) (reconcile.Result, error) { + task := r.newReconcileTask(rc, backupSchedule, log) + return control.NewExecutor(log).Execute(rc, task) +} + +func (r *PolarDBXBackupScheduleReconciler) newReconcileTask(rc *polardbxreconcile.Context, backupSchedule *polardbxv1.PolarDBXBackupSchedule, log logr.Logger) *control.Task { + task := control.NewTask() + + defer schedule.PersistPolarDBXBackupScheduleStatus(task, true) + + schedule.CleanOutdatedBackupSet(task) + + schedule.CheckNextScheduleTime(task) + + schedule.CheckUnderwayBackup(task) + + schedule.DispatchBackupTask(task) + + return task +} + +func (r *PolarDBXBackupScheduleReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{ + MaxConcurrentReconciles: r.MaxConcurrency, + RateLimiter: workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, + ), + }). + For(&polardbxv1.PolarDBXBackupSchedule{}). + Complete(r) +} diff --git a/pkg/operator/v1/polardbx/controllers/polardbxcluster_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxcluster_controller.go index d949de4..12a09c1 100644 --- a/pkg/operator/v1/polardbx/controllers/polardbxcluster_controller.go +++ b/pkg/operator/v1/polardbx/controllers/polardbxcluster_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/steps/instance/pitr" "time" "github.com/alibaba/polardbx-operator/pkg/operator/hint" @@ -118,6 +119,9 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol commonsteps.CheckDNs(task) } + //try set runmode + instancesteps.TrySetRunMode(task) + // Let's construct the complex state machine. switch polardbx.Status.Phase { case polardbxv1polardbx.PhaseNew: @@ -126,11 +130,13 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol commonsteps.InitializePolardbxLabel(task) commonsteps.GenerateRandInStatus(task) commonsteps.InitializeServiceName(task) - control.When(polardbx.Spec.Restore != nil, - commonsteps.SyncSpecFromBackupSet)(task) commonsteps.TransferPhaseTo(polardbxv1polardbx.PhasePending, true)(task) case polardbxv1polardbx.PhasePending: + control.When(polardbx.Spec.Restore != nil, + commonsteps.CreateDummyBackupObject, + pitr.LoadLatestBackupSetByTime, + commonsteps.SyncSpecFromBackupSet)(task) checksteps.CheckStorageEngines(task) commonsteps.UpdateSnapshotAndObservedGeneration(task) instancesteps.CreateSecretsIfNotFound(task) @@ -151,6 +157,10 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol // Create readonly polardbx in InitReadonly list instancesteps.CreateOrReconcileReadonlyPolardbx(task) + control.When(pitr.IsPitrRestore(polardbx), + pitr.PreparePitrBinlogs, + pitr.WaitPreparePitrBinlogs)(task) + // Create GMS and DNs. instancesteps.CreateOrReconcileGMS(task) instancesteps.CreateOrReconcileDNs(task) @@ -189,12 +199,15 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol instancesteps.CreateOrReconcileCDCs, instancesteps.WaitUntilCNDeploymentsRolledOut, instancesteps.WaitUntilCDCDeploymentsRolledOut, + instancesteps.CreateFileStorage, )(task) // Go to clean works. control.Block( control.When(!debug.IsDebugEnabled(), commonsteps.UpdateDisplayDetailedVersion), + control.When(polardbx.Status.Phase == polardbxv1polardbx.PhaseRestoring, commonsteps.CleanDummyBackupObject), commonsteps.UpdateDisplayStorageSize, + control.When(pitr.IsPitrRestore(polardbx), pitr.CleanPreparePitrBinlogJob), )(task) commonsteps.TransferPhaseTo(polardbxv1polardbx.PhaseRunning, true)(task) @@ -235,7 +248,6 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol // Always reconcile the stateless components (mainly for rebuilt). instancesteps.CreateOrReconcileCNs(task) instancesteps.CreateOrReconcileCDCs(task) - instancesteps.CreateFileStorage(task) //sync cn label to pod without rebuild pod instancesteps.TrySyncCnLabelToPodsDirectly(task) @@ -277,6 +289,7 @@ func (r *PolarDBXReconciler) newReconcileTask(rc *polardbxreconcile.Context, pol instancesteps.WaitUntilDNsReady, instancesteps.WaitUntilCNDeploymentsRolledOut, instancesteps.WaitUntilCDCDeploymentsRolledOut, + instancesteps.CreateFileStorage, )(task) // Prepare to rebalance data after DN stores are reconciled if necessary. @@ -390,8 +403,8 @@ func (r *PolarDBXReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.PolarDBXCluster{}). diff --git a/pkg/operator/v1/polardbx/controllers/polardbxclusterknobs_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxclusterknobs_controller.go index fc51863..e9a81ed 100644 --- a/pkg/operator/v1/polardbx/controllers/polardbxclusterknobs_controller.go +++ b/pkg/operator/v1/polardbx/controllers/polardbxclusterknobs_controller.go @@ -82,8 +82,8 @@ func (r *PolarDBXClusterKnobsReconciler) SetupWithManager(mgr ctrl.Manager) erro MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.PolarDBXClusterKnobs{}). diff --git a/pkg/operator/v1/polardbx/controllers/polardbxmonitor_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxmonitor_controller.go index 19ce14e..4cbdea5 100644 --- a/pkg/operator/v1/polardbx/controllers/polardbxmonitor_controller.go +++ b/pkg/operator/v1/polardbx/controllers/polardbxmonitor_controller.go @@ -121,8 +121,8 @@ func (r *PolarDBXMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.PolarDBXMonitor{}). diff --git a/pkg/operator/v1/polardbx/controllers/polardbxparameter_controller.go b/pkg/operator/v1/polardbx/controllers/polardbxparameter_controller.go index 41ababe..8173909 100644 --- a/pkg/operator/v1/polardbx/controllers/polardbxparameter_controller.go +++ b/pkg/operator/v1/polardbx/controllers/polardbxparameter_controller.go @@ -159,8 +159,8 @@ func (r *PolarDBXParameterReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.PolarDBXParameter{}). diff --git a/pkg/operator/v1/polardbx/factory/backup.go b/pkg/operator/v1/polardbx/factory/backup.go new file mode 100644 index 0000000..1c4bffa --- /dev/null +++ b/pkg/operator/v1/polardbx/factory/backup.go @@ -0,0 +1,249 @@ +package factory + +import ( + "errors" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + polardbxv1polardbx "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/convention" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + "github.com/alibaba/polardbx-operator/pkg/util/name" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type PolarDBXClusterMetadata struct { + // Name records name of original pxc + Name string `json:"name,omitempty"` + + // UID records uid of original pxc + UID types.UID `json:"uid,omitempty"` + + // Spec records the topology from original pxc + Spec *polardbxv1.PolarDBXClusterSpec `json:"spec,omitempty"` + + // Secrets records account and password for pxc + Secrets []polardbxv1polardbx.PrivilegeItem `json:"secrets,omitempty"` +} + +type XstoreMetadata struct { + // Name records name of original xstore + Name string `json:"name,omitempty"` + + // UID records uid of original xstore + UID types.UID `json:"uid,omitempty"` + + // BackupName records name of xstore backup of original xstore + BackupName string `json:"backupName,omitempty"` + + // LastCommitIndex records the last binlog index during full backup + LastCommitIndex int64 `json:"lastCommitIndex,omitempty"` + + // Secrets records account and password for xstore + Secrets []polardbxv1polardbx.PrivilegeItem `json:"secrets,omitempty"` +} + +// MetadataBackup defines metadata to be uploaded during backup +type MetadataBackup struct { + + // PolarDBXClusterMetadata records metadata of pxc which backed up + PolarDBXClusterMetadata PolarDBXClusterMetadata `json:"polarDBXClusterMetadata,omitempty"` + + // XstoreMetadataList records metadata of each xstore which backed up + XstoreMetadataList []XstoreMetadata `json:"xstoreMetadataList,omitempty"` + + // BackupSetName records name of pxb + BackupSetName string `json:"backupSetName,omitempty"` + + // BackupRootPath records the root path for pxb + BackupRootPath string `json:"backupRootPath,omitempty"` + + // StartTime records start time of backup + StartTime *metav1.Time `json:"startTime,omitempty"` + + // EndTime records end time of backup + EndTime *metav1.Time `json:"endTime,omitempty"` + + // LatestRecoverableTimestamp records the latest timestamp that can recover from current backup set + LatestRecoverableTimestamp *metav1.Time `json:"latestRecoverableTimestamp,omitempty"` +} + +func (m *MetadataBackup) GetXstoreNameList() []string { + xstoreNameList := make([]string, len(m.XstoreMetadataList)) + for i, xstoreMetadata := range m.XstoreMetadataList { + xstoreNameList[i] = xstoreMetadata.Name + } + return xstoreNameList +} + +func (m *MetadataBackup) GetXstoreMetadataByName(xstoreName string) (*XstoreMetadata, error) { + for i := range m.XstoreMetadataList { + if m.XstoreMetadataList[i].Name == xstoreName { + return &m.XstoreMetadataList[i], nil + } + } + return nil, errors.New("no such metadata related to xstore " + xstoreName) +} + +func (f *objectFactory) NewPolarDBXBackupBySchedule() (*polardbxv1.PolarDBXBackup, error) { + backupSchedule := f.rc.MustGetPolarDBXBackupSchedule() + backupName := name.NewSplicedName( + name.WithTokens(backupSchedule.Spec.BackupSpec.Cluster.Name, "backup", + backupSchedule.Status.NextBackupTime.Format("200601021504")), + name.WithPrefix("scheduled-backup"), + ) + backup := &polardbxv1.PolarDBXBackup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backupSchedule.Namespace, + Name: backupName, + Labels: map[string]string{ + meta.LabelBackupSchedule: backupSchedule.Name, + }, + }, + Spec: *backupSchedule.Spec.BackupSpec.DeepCopy(), + } + return backup, nil +} + +func (f *objectFactory) NewXStoreBackup( + xstore *polardbxv1.XStore) (*polardbxv1.XStoreBackup, error) { + backup := f.rc.MustGetPolarDBXBackup() + xstoreBackup := &polardbxv1.XStoreBackup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backup.Namespace, + Name: name.NewSplicedName( + name.WithTokens(backup.Name, xstore.Name), + name.WithPrefix("xstore-backup"), + ), + Labels: map[string]string{ + meta.LabelName: backup.Spec.Cluster.Name, + meta.LabelTopBackup: backup.Name, + meta.LabelBackupXStore: xstore.Name, + meta.LabelBackupXStoreUID: string(xstore.UID), + }, + }, + Spec: polardbxv1.XStoreBackupSpec{ + XStore: polardbxv1.XStoreReference{ + Name: xstore.Name, + UID: xstore.UID, + }, + RetentionTime: backup.Spec.RetentionTime, + StorageProvider: backup.Spec.StorageProvider, + Engine: xstore.Spec.Engine, + PreferredBackupRole: backup.Spec.PreferredBackupRole, + }, + } + + return xstoreBackup, nil +} + +func (f *objectFactory) newDummyAnnotation() map[string]string { + return map[string]string{ + meta.AnnotationDummyBackup: "true", + } +} + +func (f *objectFactory) NewDummyPolarDBXBackup(metadata *MetadataBackup) (*polardbxv1.PolarDBXBackup, error) { + if metadata == nil { + return nil, errors.New("not enough information to create dummy polardbx backup") + } + polardbx := f.rc.MustGetPolarDBX() + polardbxBackup := &polardbxv1.PolarDBXBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.NewSplicedName( + name.WithTokens(metadata.BackupSetName, "dummy"), + name.WithPrefix("dummy-backup"), + ), + Namespace: polardbx.Namespace, + Annotations: f.newDummyAnnotation(), + }, + Spec: polardbxv1.PolarDBXBackupSpec{ + Cluster: polardbxv1.PolarDBXClusterReference{ + Name: metadata.PolarDBXClusterMetadata.Name, + UID: metadata.PolarDBXClusterMetadata.UID, + }, + StorageProvider: *polardbx.Spec.Restore.StorageProvider, + }, + Status: polardbxv1.PolarDBXBackupStatus{ + Phase: polardbxv1.BackupDummy, + BackupRootPath: polardbx.Spec.Restore.From.BackupSetPath, + ClusterSpecSnapshot: metadata.PolarDBXClusterMetadata.Spec, + XStores: metadata.GetXstoreNameList(), + Backups: make(map[string]string), + }, + } + return polardbxBackup, nil +} + +func (f *objectFactory) NewDummyXstoreBackup(xstoreName string, polardbxBackup *polardbxv1.PolarDBXBackup, + metadata *MetadataBackup) (*polardbxv1.XStoreBackup, error) { + if metadata == nil { + return nil, errors.New("not enough information to create dummy xstore backup") + } + xstoreMetadata, err := metadata.GetXstoreMetadataByName(xstoreName) + if err != nil { + return nil, err + } + xstoreBackup := &polardbxv1.XStoreBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.NewSplicedName( + name.WithTokens(polardbxBackup.Name, xstoreName), + name.WithPrefix("dummy-xstore-backup"), + ), + Namespace: polardbxBackup.Namespace, + Annotations: f.newDummyAnnotation(), + }, + Spec: polardbxv1.XStoreBackupSpec{ + XStore: polardbxv1.XStoreReference{ + Name: xstoreName, + UID: xstoreMetadata.UID, + }, + StorageProvider: polardbxBackup.Spec.StorageProvider, + }, + Status: polardbxv1.XStoreBackupStatus{ + Phase: polardbxv1.XStoreBackupDummy, + CommitIndex: xstoreMetadata.LastCommitIndex, + BackupRootPath: metadata.BackupRootPath, + }, + } + return xstoreBackup, nil +} + +func (f *objectFactory) NewDummySecretBackup(sourceSecretName string, metadata *MetadataBackup) (*corev1.Secret, error) { + polardbx := f.rc.MustGetPolarDBX() + + var dummySecretName string + var sourceSecrets *[]polardbxv1polardbx.PrivilegeItem + accounts := make(map[string]string) + + if sourceSecretName == metadata.PolarDBXClusterMetadata.Name { + // secret of cn + dummySecretName = name.NewSplicedName( + name.WithTokens(metadata.BackupSetName, "dummy"), + name.WithPrefix("dummy-secret"), + ) + sourceSecrets = &metadata.PolarDBXClusterMetadata.Secrets + } else { + // secret of dn + dummySecretName = name.NewSplicedName( + name.WithTokens(metadata.BackupSetName, "dummy", sourceSecretName), + name.WithPrefix("dummy-xstore-secret"), + ) + xstoreMetadata, err := metadata.GetXstoreMetadataByName(sourceSecretName) + if err != nil { + return nil, err + } + sourceSecrets = &xstoreMetadata.Secrets + } + for _, item := range *sourceSecrets { + accounts[item.Username] = item.Password + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dummySecretName, + Namespace: polardbx.Namespace, + Labels: convention.ConstLabels(polardbx), + }, + StringData: accounts, + }, nil +} diff --git a/pkg/operator/v1/polardbx/factory/deployment.go b/pkg/operator/v1/polardbx/factory/deployment.go index 592aedf..b34b753 100644 --- a/pkg/operator/v1/polardbx/factory/deployment.go +++ b/pkg/operator/v1/polardbx/factory/deployment.go @@ -292,11 +292,32 @@ if [[ -d /home/admin/drds-worker ]]; then fi ` + cnStartCmd = ` +if [ -f /home/admin/entrypoint.sh ]; then + sh /home/admin/entrypoint.sh +fi +if [ -f /home/admin/app.sh ]; then + sh /home/admin/app.sh +fi +while [ "debug" == $(cat /etc/podinfo/runmode) ] +do + echo "debug mode" + sleep 3600 +done +` cdcServerPostStartScript = ` if [[ -d /home/admin/drds-worker ]]; then # Remove the global schedule script echo '' > /home/admin/drds-worker/bin/globalSchedule.sh fi +` + cdcStartCmd = ` +sh /home/admin/app.sh +while [ "debug" == $(cat /etc/podinfo/runmode) ] +do + echo "debug mode" + sleep 3600 +done ` ) @@ -385,6 +406,8 @@ func (f *objectFactory) newDeployment4CN(group string, mr *matchingRule, mustSta {Protocol: corev1.ProtocolTCP, Name: "htap", ContainerPort: int32(ports.HtapPort)}, {Protocol: corev1.ProtocolTCP, Name: "log", ContainerPort: int32(ports.LogPort)}, }, + Command: []string{"/bin/bash", "-c"}, + Args: []string{cnStartCmd}, VolumeMounts: volumeFactory.NewVolumeMountsForCNEngine(), Lifecycle: &corev1.Lifecycle{ PostStart: &corev1.Handler{ @@ -627,6 +650,8 @@ func (f *objectFactory) newDeployment4CDC(group string, mr *matchingRule, mustSt template.Image, config.Images().DefaultImageForCluster(polardbxmeta.RoleCDC, convention.ContainerEngine, topology.Version), ), + Command: []string{"/bin/bash", "-c"}, + Args: []string{cdcStartCmd}, ImagePullPolicy: template.ImagePullPolicy, Env: envVars, Resources: *template.Resources.DeepCopy(), @@ -644,7 +669,43 @@ func (f *objectFactory) newDeployment4CDC(group string, mr *matchingRule, mustSt SecurityContext: k8shelper.NewSecurityContext(config.Cluster().ContainerPrivileged()), } probeConfigure.ConfigureForCDCEngine(&engineContainer, ports) - containers := []corev1.Container{engineContainer} + proberContainer := corev1.Container{ + Name: convention.ContainerProber, + Image: config.Images().DefaultImageForCluster(polardbxmeta.RoleCDC, convention.ContainerProber, topology.Version), + Env: []corev1.EnvVar{ + {Name: "GOMAXPROCS", Value: "1"}, + }, + Args: []string{ + "--listen-port", fmt.Sprintf("%d", ports.GetProbePort()), + }, + Ports: []corev1.ContainerPort{ + {Protocol: corev1.ProtocolTCP, Name: "probe", ContainerPort: int32(ports.GetProbePort())}, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/liveness", + Port: intstr.FromString("probe"), + HTTPHeaders: []corev1.HTTPHeader{ + {Name: "Probe-Target", Value: probe.TypeSelf}, + }, + }, + }, + }, + VolumeMounts: volumeFactory.NewSystemVolumeMounts(), + } + if k8shelper.IsContainerQoSGuaranteed(&engineContainer) { + if featuregate.EnforceQoSGuaranteed.Enabled() { + proberContainer.Resources = corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + } + } + } + + containers := []corev1.Container{engineContainer, proberContainer} dnsPolicy := corev1.DNSClusterFirst if template.HostNetwork { diff --git a/pkg/operator/v1/polardbx/factory/env_factory.go b/pkg/operator/v1/polardbx/factory/env_factory.go index 2d5d78a..abdf873 100644 --- a/pkg/operator/v1/polardbx/factory/env_factory.go +++ b/pkg/operator/v1/polardbx/factory/env_factory.go @@ -352,7 +352,7 @@ func (e *envFactory) newBasicEnvVarsForCDCEngine(gmsConn *StorageConnection) []c clusterId := e.polardbx.Name + "-" + clusterType // FIXME CDC currently doesn't support host network, so ports are hard coded. - return []corev1.EnvVar{ + envs := []corev1.EnvVar{ {Name: "switchCloud", Value: "aliyun"}, {Name: "cluster_id", Value: clusterId}, {Name: "cluster_type", Value: clusterType}, @@ -372,6 +372,16 @@ func (e *envFactory) newBasicEnvVarsForCDCEngine(gmsConn *StorageConnection) []c {Name: "polarx_password", ValueFrom: e.newValueFromSecretKey(e.polardbx.Name, "polardbx_root")}, {Name: "dnPasswordKey", Value: e.cipher.Key()}, } + configEnvs := e.polardbx.Spec.Config.CDC.Envs + if configEnvs != nil { + for k, v := range configEnvs { + envs = append(envs, corev1.EnvVar{ + Name: k, + Value: v.String(), + }) + } + } + return envs } func (e *envFactory) NewEnvVarsForCDCEngine(gmsConn StorageConnection) []corev1.EnvVar { diff --git a/pkg/operator/v1/polardbx/factory/object_factory.go b/pkg/operator/v1/polardbx/factory/object_factory.go index 2472d36..b07a137 100644 --- a/pkg/operator/v1/polardbx/factory/object_factory.go +++ b/pkg/operator/v1/polardbx/factory/object_factory.go @@ -19,7 +19,6 @@ package factory import ( "github.com/alibaba/polardbx-operator/api/v1/polardbx" promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -41,13 +40,19 @@ type ObjectFactory interface { NewXStoreGMS() (*polardbxv1.XStore, error) NewXStoreDN(idx int) (*polardbxv1.XStore, error) NewSecret() (*corev1.Secret, error) - NewSecretFromPolarDBX(*corev1.Secret) (*corev1.Secret, error) + NewSecretForRestore() (*corev1.Secret, error) NewSecuritySecret() (*corev1.Secret, error) NewConfigMap(cmType convention.ConfigMapType) (*corev1.ConfigMap, error) NewServiceMonitors() (map[string]promv1.ServiceMonitor, error) NewReadonlyPolardbx(*polardbx.ReadonlyParam) (*polardbxv1.PolarDBXCluster, error) + + NewPolarDBXBackupBySchedule() (*polardbxv1.PolarDBXBackup, error) + NewXStoreBackup(xstore *polardbxv1.XStore) (*polardbxv1.XStoreBackup, error) + NewDummyPolarDBXBackup(metadata *MetadataBackup) (*polardbxv1.PolarDBXBackup, error) + NewDummyXstoreBackup(xstoreName string, polardbxBackup *polardbxv1.PolarDBXBackup, metadata *MetadataBackup) (*polardbxv1.XStoreBackup, error) + NewDummySecretBackup(sourceSecretName string, metadata *MetadataBackup) (*corev1.Secret, error) } type Context struct { diff --git a/pkg/operator/v1/polardbx/factory/ports_factory.go b/pkg/operator/v1/polardbx/factory/ports_factory.go index bac2025..f919065 100644 --- a/pkg/operator/v1/polardbx/factory/ports_factory.go +++ b/pkg/operator/v1/polardbx/factory/ports_factory.go @@ -22,6 +22,10 @@ import ( "k8s.io/apimachinery/pkg/util/rand" ) +type ProberPort interface { + GetAccessPort() int + GetProbePort() int +} type CNPorts struct { AccessPort int MgrPort int @@ -33,9 +37,26 @@ type CNPorts struct { ProbePort int } +func (p *CNPorts) GetAccessPort() int { + return p.AccessPort +} + +func (p *CNPorts) GetProbePort() int { + return p.ProbePort +} + type CDCPorts struct { DaemonPort int MetricsPort int + ProbePort int +} + +func (p *CDCPorts) GetAccessPort() int { + return p.DaemonPort +} + +func (p *CDCPorts) GetProbePort() int { + return p.ProbePort } type PortsFactory interface { @@ -83,6 +104,7 @@ func (f *portsFactory) NewPortsForCDCEngine() CDCPorts { return CDCPorts{ DaemonPort: 3007, MetricsPort: 8081, + ProbePort: 9999, } } diff --git a/pkg/operator/v1/polardbx/factory/probe_configure.go b/pkg/operator/v1/polardbx/factory/probe_configure.go index 26a9e63..14d3e12 100644 --- a/pkg/operator/v1/polardbx/factory/probe_configure.go +++ b/pkg/operator/v1/polardbx/factory/probe_configure.go @@ -39,14 +39,14 @@ type probeConfigure struct { polardbx *polardbxv1.PolarDBXCluster } -func (p *probeConfigure) newProbeWithProber(endpoint string, ports CNPorts) corev1.Handler { +func (p *probeConfigure) newProbeWithProber(endpoint string, probeTarget string, ports ProberPort) corev1.Handler { return corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: endpoint, - Port: intstr.FromInt(ports.ProbePort), + Port: intstr.FromInt(ports.GetProbePort()), HTTPHeaders: []corev1.HTTPHeader{ - {Name: "Probe-Target", Value: probe.TypePolarDBX}, - {Name: "Probe-Port", Value: strconv.Itoa(ports.AccessPort)}, + {Name: "Probe-Target", Value: probeTarget}, + {Name: "Probe-Port", Value: strconv.Itoa(ports.GetAccessPort())}, {Name: "Probe-Timeout", Value: "10s"}, }, }, @@ -59,17 +59,17 @@ func (p *probeConfigure) ConfigureForCNEngine(container *corev1.Container, ports TimeoutSeconds: 10, PeriodSeconds: 10, FailureThreshold: 6, - Handler: p.newProbeWithProber("/liveness", ports), + Handler: p.newProbeWithProber("/liveness", probe.TypePolarDBX, &ports), } container.LivenessProbe = &corev1.Probe{ TimeoutSeconds: 10, PeriodSeconds: 10, - Handler: p.newProbeWithProber("/liveness", ports), + Handler: p.newProbeWithProber("/liveness", probe.TypePolarDBX, &ports), } container.ReadinessProbe = &corev1.Probe{ TimeoutSeconds: 10, PeriodSeconds: 10, - Handler: p.newProbeWithProber("/readiness", ports), + Handler: p.newProbeWithProber("/readiness", probe.TypePolarDBX, &ports), } } @@ -97,23 +97,17 @@ func (p *probeConfigure) ConfigureForCNExporter(container *corev1.Container, por func (p *probeConfigure) ConfigureForCDCEngine(container *corev1.Container, ports CDCPorts) { container.StartupProbe = &corev1.Probe{ - TimeoutSeconds: 10, - PeriodSeconds: 10, - FailureThreshold: 30, - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(ports.DaemonPort), - }, - }, + InitialDelaySeconds: 10, + TimeoutSeconds: 10, + PeriodSeconds: 10, + FailureThreshold: 18, + Handler: p.newProbeWithProber("/liveness", probe.TypeCdc, &ports), } - container.LivenessProbe = &corev1.Probe{ - PeriodSeconds: 20, - Handler: corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt(ports.DaemonPort), - }, - }, + TimeoutSeconds: 10, + PeriodSeconds: 10, + FailureThreshold: 5, + Handler: p.newProbeWithProber("/liveness", probe.TypeCdc, &ports), } } diff --git a/pkg/operator/v1/polardbx/factory/secret.go b/pkg/operator/v1/polardbx/factory/secret.go index e83b6d6..858ab4d 100644 --- a/pkg/operator/v1/polardbx/factory/secret.go +++ b/pkg/operator/v1/polardbx/factory/secret.go @@ -17,31 +17,36 @@ limitations under the License. package factory import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/convention" "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/helper" "github.com/alibaba/polardbx-operator/pkg/util/defaults" "github.com/alibaba/polardbx-operator/pkg/util/ssl" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" ) -func (f *objectFactory) NewSecretFromPolarDBX(secret *corev1.Secret) (*corev1.Secret, error) { +func (f *objectFactory) NewSecretForRestore() (*corev1.Secret, error) { polardbx := f.rc.MustGetPolarDBX() - data := make(map[string][]byte) - for user, passwd := range secret.Data { - data[user] = passwd + originalSecret, err := f.rc.GetPolarDBXSecretForRestore() + if err != nil || originalSecret == nil { + return nil, err } - return &corev1.Secret{ + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: convention.NewSecretName(polardbx, convention.SecretTypeAccount), Namespace: polardbx.Namespace, Labels: convention.ConstLabels(polardbx), }, - Data: data, - }, nil + } + + data := make(map[string][]byte) + for user, passwd := range originalSecret.Data { + data[user] = passwd + } + secret.Data = data + return secret, nil } func (f *objectFactory) NewSecret() (*corev1.Secret, error) { diff --git a/pkg/operator/v1/polardbx/factory/storage.go b/pkg/operator/v1/polardbx/factory/storage.go index 5f0dbdb..87f7f48 100644 --- a/pkg/operator/v1/polardbx/factory/storage.go +++ b/pkg/operator/v1/polardbx/factory/storage.go @@ -478,6 +478,7 @@ func (f *objectFactory) newXStore( Value: &mycnfOverlay, }, }, + Envs: polardbx.Spec.Config.DN.Envs, }, Topology: polardbxv1xstore.Topology{ NodeSets: nodeSets, @@ -502,11 +503,16 @@ func (f *objectFactory) newXStore( if err != nil { return nil, err } + var pitrEndpoint string + if polardbx.Status.PitrStatus != nil { + pitrEndpoint = polardbx.Status.PitrStatus.PrepareJobEndpoint + } xstore.Spec.Restore = &polardbxv1.XStoreRestoreSpec{ BackupSet: backupSet, From: polardbxv1.XStoreRestoreFrom{ XStoreName: restoreName, }, + PitrEndpoint: pitrEndpoint, } } @@ -515,22 +521,23 @@ func (f *objectFactory) newXStore( return xstore, nil } -func (f *objectFactory) GetXStoreName(polardbx polardbxv1.PolarDBXCluster, name string) (string, error) { +func (f *objectFactory) GetOriginalXstoreNameForRestore(polardbx polardbxv1.PolarDBXCluster, name string) (string, error) { backup := &polardbxv1.PolarDBXBackup{} + var err error if polardbx.Spec.Restore.BackupSet == "" && len(polardbx.Spec.Restore.BackupSet) == 0 { - backup, _ = f.rc.GetCompletedPXCBackup(map[string]string{polardbxmeta.LabelName: polardbx.Spec.Restore.From.PolarBDXName}) + backup, err = f.rc.GetCompletedPXCBackup(map[string]string{polardbxmeta.LabelName: polardbx.Spec.Restore.From.PolarBDXName}) } else { - backup, _ = f.rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) + backup, err = f.rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) } - if backup == nil { - return "", nil + if err != nil { + return "", err } for _, xstoreName := range backup.Status.XStores { - if xstoreName[len(xstoreName)-4:] == name[len(name)-4:] { + if xstoreName[len(xstoreName)-4:] == name[len(name)-4:] { // safe when quantity of dn less than 10000 return xstoreName, nil } } - return "", nil + return "", errors.New("failed to find matched xstore") } func (f *objectFactory) GetXStoreBackupName(backupName, xstoreName string) (string, error) { @@ -538,11 +545,13 @@ func (f *objectFactory) GetXStoreBackupName(backupName, xstoreName string) (stri if err != nil { return "", err } - xstoreBackupName, ok := backup.Status.Backups[xstoreName] - if ok { - return xstoreBackupName, nil + if backup != nil { + xstoreBackupName, ok := backup.Status.Backups[xstoreName] + if ok { + return xstoreBackupName, nil + } } - return "", errors.New("not found xstoreBackup") + return "", errors.New("failed to get xstore backup name") } func (f *objectFactory) newMycnfOverlayInfFile(polardbxstore *polardbxv1.PolarDBXCluster, enforceTso bool) (*ini.File, error) { @@ -561,14 +570,6 @@ func (f *objectFactory) newMycnfOverlayInfFile(polardbxstore *polardbxv1.PolarDB return nil, err } - file.Section("").Key("loose_query_cache_type").SetValue("OFF") - file.Section("").Key("loose_query_cache_size").SetValue("0") - file.Section("").Key("loose_innodb_thread_concurrency").SetValue("0") - file.Section("").Key("loose_concurrent_insert").SetValue("0") - file.Section("").Key("loose_gts_lease").SetValue("2000") - file.Section("").Key("loose_log_bin_use_v1_row_events").SetValue("off") - file.Section("").Key("loose_binlog_checksum").SetValue("crc32") - if enforceTso { file.Section("").Key("loose_enable_gts").SetValue("1") } @@ -619,7 +620,7 @@ func (f *objectFactory) NewXStoreGMS() (*polardbxv1.XStore, error) { restoreName := "" if polardbx.Status.Phase == polardbxv1polardbx.PhaseRestoring { - restoreName, err = f.GetXStoreName(*polardbx, xstoreName) + restoreName, err = f.GetOriginalXstoreNameForRestore(*polardbx, xstoreName) if err != nil { return nil, err } @@ -663,7 +664,7 @@ func (f *objectFactory) NewXStoreDN(idx int) (*polardbxv1.XStore, error) { xstoreName := convention.NewDNName(polardbx, idx) restoreName := "" if polardbx.Status.Phase == polardbxv1polardbx.PhaseRestoring { - restoreName, err = f.GetXStoreName(*polardbx, xstoreName) + restoreName, err = f.GetOriginalXstoreNameForRestore(*polardbx, xstoreName) if err != nil { return nil, err } diff --git a/pkg/operator/v1/polardbx/meta/annotation.go b/pkg/operator/v1/polardbx/meta/annotation.go index a3245b3..229e49a 100644 --- a/pkg/operator/v1/polardbx/meta/annotation.go +++ b/pkg/operator/v1/polardbx/meta/annotation.go @@ -33,3 +33,17 @@ const ( AnnotationTopologyModeGuide = "polardbx/topology-mode-guide" AnnotationTopologyRuleGuide = "polardbx/topology-rule-guide" ) + +// Backup annotations +const ( + AnnotationDummyBackup = "polardbx/dummy-backup" + AnnotationBackupBinlog = "polardbx/backupbinlog" +) + +const ( + AnnotationStorageType = "polardbx/storage-type" +) + +const ( + AnnotationPitrConfig = "polardbx/pitr-config" +) diff --git a/pkg/operator/v1/polardbx/meta/label.go b/pkg/operator/v1/polardbx/meta/label.go index 51b0f65..99ca635 100644 --- a/pkg/operator/v1/polardbx/meta/label.go +++ b/pkg/operator/v1/polardbx/meta/label.go @@ -17,28 +17,34 @@ limitations under the License. package meta const ( - LabelName = "polardbx/name" - LabelRand = "polardbx/rand" - LabelRole = "polardbx/role" - LabelCNType = "polardbx/cn-type" - LabelDNIndex = "polardbx/dn-index" - LabelTopologyRule = "polardbx/topology-rule" - LabelGeneration = "polardbx/generation" - LabelPortLock = "polardbx/port-lock" - LabelGroup = "polardbx/group" - LabelHash = "polardbx/hash" - LabelTopBackup = "polardbx/top-backup" - LabelBackupXStore = "polardbx/xstore" - LabelBackupXStoreUID = "polardbx/xstore-uid" - LabelPreferredBackupNode = "polardbx/preferred-backup-node" - LabelBinlogPurgeLock = "polardbx/binlogpurge-lock" - LabelPrimaryName = "polardbx/primary-name" - LabelType = "polardbx/type" - LabelAuditLog = "polardbx/enableAuditLog" + LabelName = "polardbx/name" + LabelUid = "polardbx/uid" + LabelRand = "polardbx/rand" + LabelRole = "polardbx/role" + LabelCNType = "polardbx/cn-type" + LabelDNIndex = "polardbx/dn-index" + LabelTopologyRule = "polardbx/topology-rule" + LabelGeneration = "polardbx/generation" + LabelPortLock = "polardbx/port-lock" + LabelGroup = "polardbx/group" + LabelHash = "polardbx/hash" + LabelTopBackup = "polardbx/top-backup" + LabelBackupXStore = "polardbx/xstore" + LabelBackupXStoreUID = "polardbx/xstore-uid" + LabelBinlogPurgeLock = "polardbx/binlogpurge-lock" + LabelPrimaryName = "polardbx/primary-name" + LabelType = "polardbx/type" + LabelAuditLog = "polardbx/enableAuditLog" + LabelBackupSchedule = "polardbx/backup-schedule" + LabelBackupBinlog = "polardbx/backupBinlog" + LabelJobType = "polardbx/jobType" ) + const ( SeekCpJobLabelPXCName = "seekcp-job/pxc" SeekCpJobLabelBackupName = "seekcp-job/backup" + // SeekCpJobLabelPodName denotes the pod on which seekcp job performed + SeekCpJobLabelPodName = "seekcp-job/pod" ) const ( @@ -76,3 +82,10 @@ const ( TypeMaster = "master" TypeReadonly = "readonly" ) + +type PxcJobType string + +const ( + HeartbeatJobType PxcJobType = "PitrHeartbeat" + PitrPrepareBinlogJobType PxcJobType = "PitrPrepareBinlogs" +) diff --git a/pkg/operator/v1/polardbx/reconcile/context.go b/pkg/operator/v1/polardbx/reconcile/context.go index 530cf7d..6975b58 100644 --- a/pkg/operator/v1/polardbx/reconcile/context.go +++ b/pkg/operator/v1/polardbx/reconcile/context.go @@ -20,10 +20,10 @@ import ( "encoding/json" "errors" "fmt" - "github.com/alibaba/polardbx-operator/pkg/debug" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" xstoreconvention "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -79,10 +79,15 @@ type Context struct { polardbxBackupKey types.NamespacedName polardbxBackupStatusSnapshot *polardbxv1.PolarDBXBackupStatus polardbxSeekCpJob *batchv1.Job - polardbxParameterTemplate *polardbxv1.PolarDBXParameterTemplate - polardbxTemplateParams map[string]map[string]polardbxv1.TemplateParams - polardbxParamsRoleMap map[string]map[string]polardbxv1.Params - roleToRestart map[string]bool + + polardbxBackupScheduleKey types.NamespacedName + polardbxBackupSchedule *polardbxv1.PolarDBXBackupSchedule + polardbxBackupScheduleStatus *polardbxv1.PolarDBXBackupScheduleStatus + + polardbxParameterTemplate *polardbxv1.PolarDBXParameterTemplate + polardbxTemplateParams map[string]map[string]polardbxv1.TemplateParams + polardbxParamsRoleMap map[string]map[string]polardbxv1.Params + roleToRestart map[string]bool polardbxParameter *polardbxv1.PolarDBXParameter polardbxParameterKey types.NamespacedName @@ -95,11 +100,19 @@ type Context struct { configLoader func() config.Config // Managers - gmsManager gms.Manager - groupManager group.GroupManager - xstoreManager group.GroupManager + gmsManager gms.Manager + groupManager group.GroupManager + // xstoreManagerMap records xstore's pod name and its related group manager + xstoreManagerMap map[string]group.GroupManager taskConfigMap *corev1.ConfigMap + + // Filestream client + filestreamClient *filestream.FileClient + + //backup binlog + backupBinlog *polardbxv1.PolarDBXBackupBinlog + backupBinlogKey types.NamespacedName } func (rc *Context) Debug() bool { @@ -196,31 +209,6 @@ func (rc *Context) MustGetPolarDBX() *polardbxv1.PolarDBXCluster { return polardbx } -func (rc *Context) GetXStoreNameForOldPXC() (string, string, error) { - polardbx := rc.MustGetPolarDBX() - backup := &polardbxv1.PolarDBXBackup{} - if polardbx.Spec.Restore.BackupSet == "" && len(polardbx.Spec.Restore.BackupSet) == 0 { - backup, _ = rc.GetCompletedPXCBackup(map[string]string{polardbxmeta.LabelName: polardbx.Spec.Restore.From.PolarBDXName}) - } else { - backup, _ = rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) - } - if backup == nil { - return "", "", errors.New("backup not found") - } - pxcName := backup.Spec.Cluster.Name - - for _, xstoreName := range backup.Status.XStores { - splitXStoreName := strings.Split(xstoreName, "-") - if splitXStoreName[len(splitXStoreName)-2] == "dn" { - return splitXStoreName[len(splitXStoreName)-3], pxcName, nil - } else { - continue - } - } - - return "", "", errors.New("Not found DN") -} - func (rc *Context) GetPrimaryPolarDBX() (*polardbxv1.PolarDBXCluster, error) { polardbx := rc.MustGetPolarDBX() @@ -360,6 +348,13 @@ func (rc *Context) SetControllerRefAndUpdate(obj client.Object) error { return rc.Client().Update(rc.Context(), obj) } +func (rc *Context) SetControllerToOwnerAndCreate(owner, obj client.Object) error { + if err := ctrl.SetControllerReference(owner, obj, rc.Scheme()); err != nil { + return err + } + return rc.Client().Create(rc.Context(), obj) +} + func (rc *Context) IsPolarDBXStatusChanged() bool { if rc.polardbxStatus == nil { return false @@ -492,12 +487,12 @@ func (rc *Context) ParseRestoreTime() (time.Time, error) { return time.Time{}, nil } - location, err := time.LoadLocation(gms.StrOrDefault(polarDBX.Spec.Restore.TimeZone, "Asia/Shanghai")) + location, err := time.LoadLocation(gms.StrOrDefault(polarDBX.Spec.Restore.TimeZone, "UTC")) if err != nil { return time.Time{}, nil } - return time.ParseInLocation("2006-01-02 15:04:05", polarDBX.Spec.Restore.Time, location) + return time.ParseInLocation("2006-01-02T15:04:05Z", polarDBX.Spec.Restore.Time, location) } func (rc *Context) MustParseRestoreTime() time.Time { @@ -764,14 +759,24 @@ func (rc *Context) GetDN(i int) (*polardbxv1.XStore, error) { func (rc *Context) GetLeaderOfDN(xstore *polardbxv1.XStore) (*corev1.Pod, error) { var leaderPod corev1.Pod - leadrPodName := types.NamespacedName{Namespace: rc.Namespace(), Name: xstore.Status.LeaderPod} - err := rc.Client().Get(rc.Context(), leadrPodName, &leaderPod) + leaderPodName := types.NamespacedName{Namespace: rc.Namespace(), Name: xstore.Status.LeaderPod} + err := rc.Client().Get(rc.Context(), leaderPodName, &leaderPod) if err != nil { return nil, err } return &leaderPod, nil } +func (rc *Context) GetXstoreByPod(pod *corev1.Pod) (*polardbxv1.XStore, error) { + var xstore polardbxv1.XStore + xstoreName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Labels[xstoremeta.LabelName]} + err := rc.Client().Get(rc.Context(), xstoreName, &xstore) + if err != nil { + return nil, err + } + return &xstore, nil +} + func (rc *Context) getDeploymentMap(polardbx *polardbxv1.PolarDBXCluster, role string) (map[string]*appsv1.Deployment, error) { var deploymentList appsv1.DeploymentList err := rc.Client().List(rc.Context(), &deploymentList, @@ -983,7 +988,8 @@ func (rc *Context) GetPolarDBXGMSManager() (gms.Manager, error) { } gmsEngine := gmsStore.Spec.Engine - storageType, err := gms.GetStorageType(gmsEngine, gmsStore.Status.EngineVersion) + annoStorageType, _ := polardbx.Annotations[polardbxmeta.AnnotationStorageType] + storageType, err := gms.GetStorageType(gmsEngine, gmsStore.Status.EngineVersion, annoStorageType) if err != nil { return nil, err } @@ -1063,7 +1069,7 @@ func (rc *Context) GetPolarDBXGroupManager() (group.GroupManager, error) { return rc.groupManager, nil } -func (rc *Context) GetPolarDBXCNGroupManager(backup *polardbxv1.PolarDBXBackup) (group.GroupManager, error) { +func (rc *Context) GetPolarDBXGroupManagerByBackup(backup *polardbxv1.PolarDBXBackup) (group.GroupManager, error) { serviceKey := types.NamespacedName{Namespace: backup.Namespace, Name: backup.Spec.Cluster.Name} service := corev1.Service{} err := rc.Client().Get(rc.Context(), serviceKey, &service) @@ -1090,41 +1096,44 @@ func (rc *Context) GetPolarDBXCNGroupManager(backup *polardbxv1.PolarDBXBackup) return rc.groupManager, nil } -func (rc *Context) GetPolarDBXGroupManagerByXStorePod(pod corev1.Pod) (group.GroupManager, *polardbxv1.XStore, error) { - var xstore polardbxv1.XStore - var serviceList corev1.ServiceList - err := rc.Client().List(rc.Context(), &serviceList, client.InNamespace(rc.Namespace()), client.MatchingLabels{ - xstoremeta.LabelPod: pod.Name, - }) - if err != nil || len(serviceList.Items) == 0 { - return nil, nil, err +func (rc *Context) GetXstoreGroupManagerByPod(pod *corev1.Pod) (group.GroupManager, error) { + if rc.xstoreManagerMap != nil { + if mgr, ok := rc.xstoreManagerMap[pod.Name]; ok { + return mgr, nil + } } - host := serviceList.Items[0].Name + "." + pod.Namespace - xstoreSpec := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Labels[xstoremeta.LabelName]} - err = rc.Client().Get(rc.Context(), xstoreSpec, &xstore) + + podService, err := rc.GetService(xstoreconvention.NewXstorePodServiceName(pod)) if err != nil { - return nil, nil, err + return nil, err } - passwd, err := rc.GetXStoreAccountPassword(xstoreconvention.SuperAccount, xstore) + host, port, err := k8shelper.GetClusterIpPortFromService(podService, convention.PortAccess) if err != nil { - return nil, nil, err + return nil, err + } + xstore, err := rc.GetXstoreByPod(pod) + if err != nil { + return nil, err } - port, err := strconv.Atoi(pod.Labels[xstoremeta.LabelPortLock]) + passwd, err := rc.GetXStoreAccountPassword(xstoreconvention.SuperAccount, xstore) if err != nil { - return nil, nil, err + return nil, err } - rc.groupManager = group.NewGroupManager( + if rc.xstoreManagerMap == nil { + rc.xstoreManagerMap = make(map[string]group.GroupManager) + } + rc.xstoreManagerMap[pod.Name] = group.NewGroupManager( rc.Context(), dbutil.MySQLDataSource{ Host: host, - Port: port, + Port: int(port), Username: xstoreconvention.SuperAccount, Password: passwd, }, true, ) - return rc.groupManager, &xstore, nil + return rc.xstoreManagerMap[pod.Name], nil } func (rc *Context) Close() error { @@ -1144,6 +1153,15 @@ func (rc *Context) Close() error { } } + if rc.xstoreManagerMap != nil { + for _, mgr := range rc.xstoreManagerMap { + err := mgr.Close() + if err != nil { + errs = append(errs, err) + } + } + } + if err := rc.BaseReconcileContext.Close(); err != nil { errs = append(errs, err) } @@ -1222,11 +1240,8 @@ func (rc *Context) UpdatePolarDBXBackup() error { } func (rc *Context) UpdatePolarDBXBackupStatus() error { - if debug.IsDebugEnabled() { - } else { - if rc.polardbxBackupStatusSnapshot == nil { - return nil - } + if rc.polardbxBackupStatusSnapshot == nil { + return nil } err := rc.Client().Status().Update(rc.Context(), rc.polardbxBackup) if err != nil { @@ -1257,6 +1272,19 @@ func (rc *Context) GetXStoreBackups() (*polardbxv1.XStoreBackupList, error) { return &xstoreBackups, nil } +func (rc *Context) GetXstoreBackupByName(xstoreBackupName string) (*polardbxv1.XStoreBackup, error) { + var xstoreBackup polardbxv1.XStoreBackup + xstoreBackupKey := types.NamespacedName{ + Namespace: rc.Namespace(), + Name: xstoreBackupName, + } + err := rc.Client().Get(rc.Context(), xstoreBackupKey, &xstoreBackup) + if err != nil { + return nil, err + } + return &xstoreBackup, nil +} + func (rc *Context) GetXStoreBackupPods() ([]corev1.Pod, error) { xstoreBackups, err := rc.GetXStoreBackups() if err != nil { @@ -1275,7 +1303,7 @@ func (rc *Context) GetXStoreBackupPods() ([]corev1.Pod, error) { return pods, nil } -func (rc *Context) GetXStoreAccountPassword(user string, xstore polardbxv1.XStore) (string, error) { +func (rc *Context) GetXStoreAccountPassword(user string, xstore *polardbxv1.XStore) (string, error) { secret, err := rc.GetXStoreSecret(xstore) if err != nil { return "", err @@ -1287,13 +1315,13 @@ func (rc *Context) GetXStoreAccountPassword(user string, xstore polardbxv1.XStor return string(passwd), nil } -func (rc *Context) GetXStoreSecret(xstore polardbxv1.XStore) (*corev1.Secret, error) { +func (rc *Context) GetXStoreSecret(xstore *polardbxv1.XStore) (*corev1.Secret, error) { secretKey := types.NamespacedName{Namespace: xstore.Namespace, Name: xstore.Name} secret, err := rc.objectCache.GetObject(rc.Context(), secretKey, &corev1.Secret{}) if err != nil { return nil, err } - if err := k8shelper.CheckControllerReference(secret, &xstore); err != nil { + if err := k8shelper.CheckControllerReference(secret, xstore); err != nil { return nil, err } return secret.(*corev1.Secret), nil @@ -1355,18 +1383,18 @@ func (rc *Context) GetLastCompletedPXCBackup(matchLabels map[string]string, befo return nil, err } var lastBackup *polardbxv1.PolarDBXBackup = nil - var lastBackupStartTime *time.Time = nil + var lastBackupRestoreTime *time.Time = nil for i := range polardbxBackupList.Items { backup := &polardbxBackupList.Items[i] if backup.Status.Phase != polardbxv1.BackupFinished { continue } - if backup.Status.EndTime.After(beforeTime) { + if backup.Status.LatestRecoverableTimestamp.After(beforeTime) { continue } - if lastBackupStartTime == nil || - lastBackupStartTime.Before(backup.Status.StartTime.Time) { - lastBackupStartTime = &backup.Status.StartTime.Time + if lastBackupRestoreTime == nil || + lastBackupRestoreTime.Before(backup.Status.LatestRecoverableTimestamp.Time) { + lastBackupRestoreTime = &backup.Status.LatestRecoverableTimestamp.Time lastBackup = backup } } @@ -1417,7 +1445,7 @@ func (rc *Context) GetOrCreatePolarDBXBackupTaskConfigMap() (*corev1.ConfigMap, backup := rc.MustGetPolarDBXBackup() var cm corev1.ConfigMap - err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: util.PolarDBXBackupStableName(backup, "seekcp")}, &cm) + err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: name.PolarDBXBackupStableName(backup, "seekcp")}, &cm) if err != nil { if apierrors.IsNotFound(err) { rc.taskConfigMap = NewTaskConfigMap(backup) @@ -1666,3 +1694,112 @@ func (rc *Context) NewSecretFromPolarDBX(secret *corev1.Secret) (*corev1.Secret, Data: data, }, nil } + +func (rc *Context) GetFilestreamClient() (*filestream.FileClient, error) { + if rc.filestreamClient == nil { + hostPort := strings.SplitN(rc.Config().Store().FilestreamServiceEndpoint(), ":", 2) + if len(hostPort) < 2 { + return nil, errors.New("invalid filestream endpoint: " + rc.Config().Store().FilestreamServiceEndpoint()) + } + port, err := strconv.Atoi(hostPort[1]) + if err != nil { + return nil, errors.New("invalid filestream port: " + hostPort[1]) + } + rc.filestreamClient = filestream.NewFileClient(hostPort[0], port, nil) + } + return rc.filestreamClient, nil +} + +func (rc *Context) SetPolarDBXBackupScheduleKey(key types.NamespacedName) { + rc.polardbxBackupScheduleKey = key +} + +func (rc *Context) GetPolarDBXBackupSchedule() (*polardbxv1.PolarDBXBackupSchedule, error) { + if rc.polardbxBackupSchedule == nil { + schedule, err := rc.objectCache.GetObject( + rc.Context(), + rc.polardbxBackupScheduleKey, + &polardbxv1.PolarDBXBackupSchedule{}, + ) + if err != nil { + return nil, err + } + rc.polardbxBackupSchedule = schedule.(*polardbxv1.PolarDBXBackupSchedule) + rc.polardbxBackupScheduleStatus = rc.polardbxBackupSchedule.Status.DeepCopy() + } + return rc.polardbxBackupSchedule, nil +} + +func (rc *Context) MustGetPolarDBXBackupSchedule() *polardbxv1.PolarDBXBackupSchedule { + schedule, err := rc.GetPolarDBXBackupSchedule() + if err != nil { + panic(err) + } + return schedule +} + +func (rc *Context) IsPolarDBXBackupScheduleStatusChanged() bool { + if rc.polardbxBackupScheduleStatus == nil { + return false + } + return !equality.Semantic.DeepEqual(rc.polardbxBackupScheduleStatus, &rc.polardbxBackupSchedule.Status) +} + +func (rc *Context) UpdatePolarDBXBackupScheduleStatus() error { + if rc.polardbxBackupScheduleStatus == nil { + return nil + } + err := rc.Client().Status().Update(rc.Context(), rc.polardbxBackupSchedule) + if err != nil { + return err + } + rc.polardbxBackupScheduleStatus = rc.polardbxBackupSchedule.Status.DeepCopy() + return nil +} + +func (rc *Context) GetPolarDBXBackupListByPolarDBXName(polardbxName string) (*polardbxv1.PolarDBXBackupList, error) { + return rc.GetPolarDBXBackupListByLabels(map[string]string{polardbxmeta.LabelName: polardbxName}) +} + +func (rc *Context) GetPolarDBXBackupListByScheduleName(scheduleName string) (*polardbxv1.PolarDBXBackupList, error) { + return rc.GetPolarDBXBackupListByLabels(map[string]string{polardbxmeta.LabelBackupSchedule: scheduleName}) +} + +func (rc *Context) GetPolarDBXBackupListByLabels(labels map[string]string) (*polardbxv1.PolarDBXBackupList, error) { + var backupList polardbxv1.PolarDBXBackupList + err := rc.Client().List(rc.Context(), &backupList, client.InNamespace(rc.Namespace()), client.MatchingLabels(labels)) + if err != nil { + return nil, err + } + return &backupList, nil +} + +func (rc *Context) SetBackupBinlogKey(key types.NamespacedName) { + rc.backupBinlogKey = key +} + +func (rc *Context) GetPolarDBXBackupBinlog() (*polardbxv1.PolarDBXBackupBinlog, error) { + if rc.backupBinlog == nil { + var backupBinlog polardbxv1.PolarDBXBackupBinlog + err := rc.Client().Get(rc.Context(), rc.backupBinlogKey, &backupBinlog) + if err != nil { + return nil, err + } + rc.backupBinlog = &backupBinlog + } + return rc.backupBinlog, nil +} + +func (rc *Context) MustGetPolarDBXBackupBinlog() *polardbxv1.PolarDBXBackupBinlog { + backupBinlog, err := rc.GetPolarDBXBackupBinlog() + if err != nil { + panic(err) + } + return backupBinlog +} + +func (rc *Context) UpdatePolarDbXBackupBinlog() error { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + err := rc.Client().Update(rc.Context(), backupBinlog) + return err +} diff --git a/pkg/operator/v1/polardbx/steps/backup/common/object.go b/pkg/operator/v1/polardbx/steps/backup/common/object.go index 5b0b17a..db84b17 100644 --- a/pkg/operator/v1/polardbx/steps/backup/common/object.go +++ b/pkg/operator/v1/polardbx/steps/backup/common/object.go @@ -18,20 +18,23 @@ package common import ( "bytes" + "encoding/json" "errors" "fmt" polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" xstorev1 "github.com/alibaba/polardbx-operator/api/v1" - "github.com/alibaba/polardbx-operator/pkg/debug" + polardbxv1polardbx "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" "github.com/alibaba/polardbx-operator/pkg/k8s/control" k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/factory" polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" - backupbuilder "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/steps/backup/xstorejobbuilder" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" xstorectrlerrors "github.com/alibaba/polardbx-operator/pkg/util/error" + "github.com/alibaba/polardbx-operator/pkg/util/path" + "github.com/google/uuid" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -64,14 +67,19 @@ var UpdateBackupStartInfo = polardbxv1reconcile.NewStepBinder("UpdateBackupStart backup.Labels = make(map[string]string) } backup.Labels[polardbxmeta.LabelName] = backup.Spec.Cluster.Name - backup.Status.BackupRootPath = util.BackupRootPath(backup) - - // record topology of original pxc - pxc, err := rc.GetPolarDBX() + backup.Status.BackupRootPath = path.NewPathFromStringSequence( + polardbxmeta.BackupPath, + backup.Labels[polardbxmeta.LabelName], + fmt.Sprintf("%s-%s", backup.Name, backup.Status.StartTime.Format("20060102150405")), + ) + + // record topology of original polardbx + polardbx, err := rc.GetPolarDBX() if err != nil { - return flow.Error(err, "Unable to get original pxc") + return flow.Error(err, "Unable to get original polardbx") } - backup.Status.ClusterSpecSnapshot = pxc.Spec.DeepCopy() + backup.Spec.Cluster.UID = polardbx.UID + backup.Status.ClusterSpecSnapshot = polardbx.Spec.DeepCopy() if err := rc.UpdatePolarDBXBackup(); err != nil { return flow.Error(err, "Unable to update PXC backup.") @@ -115,12 +123,14 @@ var CreateBackupJobsForXStore = polardbxv1reconcile.NewStepBinder("CreateBackups continue } - xstoreBackup, err := backupbuilder.NewXStoreBackup(rc.Scheme(), backup, &xstore) + objectFactory := factory.NewObjectFactory(rc) + xstoreBackup, err := objectFactory.NewXStoreBackup(&xstore) if err != nil { return flow.Error(err, "Unable to build new physical backup for xstore", "xstore", xstore.Name) } - if err = rc.Client().Create(rc.Context(), xstoreBackup); err != nil { + err = rc.SetControllerRefAndCreateToBackup(xstoreBackup) + if err != nil { return flow.Error(err, "Unable to create physical backup for xstore", "xstore", xstore.Name) } backup.Status.XStores = append(backup.Status.XStores, xstoreBackup.Spec.XStore.Name) @@ -195,19 +205,13 @@ var DeleteBackupJobsOnFailure = polardbxv1reconcile.NewStepBinder("DeleteBackupJ var PersistentStatusChanges = polardbxv1reconcile.NewStepBinder("PersistentStatusChanges", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { - if debug.IsDebugEnabled() { - if err := rc.UpdatePolarDBXBackupStatus(); err != nil { - return flow.Error(err, "Unable to update status for xstore backup.") - } - return flow.Continue("PXC Backup status updated!") - } if rc.IsPXCBackupStatusChanged() { if err := rc.UpdatePolarDBXBackupStatus(); err != nil { - return flow.Error(err, "Unable to update status for xstore backup.") + return flow.Error(err, "Unable to update status for PolarDBX backup.") } - return flow.Continue("PXC Backup status updated!") + return flow.Continue("PolarDBX backup status updated!") } - return flow.Continue("PXC Backup status not changed!") + return flow.Continue("PolarDBX backup status not changed!") }) var LockXStoreBinlogPurge = polardbxv1reconcile.NewStepBinder("LockBinlogPurge", @@ -258,24 +262,24 @@ var CollectBinlogStartIndex = polardbxv1reconcile.NewStepBinder("CollectBinlogSt return flow.Error(err, "Unable to get XStore list") } for _, backupPod := range backupPodList { - groupManager, xstore, err := rc.GetPolarDBXGroupManagerByXStorePod(backupPod) + groupManager, err := rc.GetXstoreGroupManagerByPod(&backupPod) if err != nil { - return flow.Error(err, "get DataSource Failed") + return flow.Error(err, "get datasource failed", "pod", backupPod.Name) } - - binlogOffset, err := groupManager.GetBinlogOffset() - if err != nil { - return flow.Error(err, "get binlogoffset Failed") + if groupManager == nil { + return flow.Error(errors.New("group manager is nil"), + "get group manager failed", "pod", backupPod.Name) } - err = rc.Close() + binlogOffset, err := groupManager.GetBinlogOffset() if err != nil { - return flow.Error(err, "Close Database Failed") + return flow.Error(err, "get binlogoffset failed", "pod", backupPod.Name) } - binlogOffset = fmt.Sprintf("%s\ntimestamp:%s", binlogOffset, time.Now().Format("2006-01-02 15:04:05")) backupRootPath := pxcBackup.Status.BackupRootPath - remotePath := fmt.Sprintf("%s/%s/%s-start", backupRootPath, polardbxmeta.BinlogOffsetPath, xstore.Name) + xstoreName := backupPod.Labels[xstoremeta.LabelName] + binlogOffset = fmt.Sprintf("%s\ntimestamp:%s", binlogOffset, time.Now().Format("2006-01-02 15:04:05")) + remotePath := fmt.Sprintf("%s/%s/%s-start", backupRootPath, polardbxmeta.BinlogOffsetPath, xstoreName) command := command.NewCanonicalCommandBuilder().Collect(). UploadOffset(binlogOffset, remotePath, string(pxcBackup.Spec.StorageProvider.StorageName), pxcBackup.Spec.StorageProvider.Sink).Build() stdout := &bytes.Buffer{} @@ -303,9 +307,7 @@ var CollectBinlogStartIndex = polardbxv1reconcile.NewStepBinder("CollectBinlogSt var DrainCommittingTrans = polardbxv1reconcile.NewStepBinder("DrainCommittingTrans", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { backup := rc.MustGetPolarDBXBackup() - - cnManager, err := rc.GetPolarDBXCNGroupManager(backup) - defer rc.Close() + cnManager, err := rc.GetPolarDBXGroupManagerByBackup(backup) if err != nil { return flow.Error(err, "get CN DataSource Failed") } @@ -320,11 +322,9 @@ var DrainCommittingTrans = polardbxv1reconcile.NewStepBinder("DrainCommittingTra var SendHeartBeat = polardbxv1reconcile.NewStepBinder("SendHeartBeat", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { backup := rc.MustGetPolarDBXBackup() - - cnManager, err := rc.GetPolarDBXCNGroupManager(backup) - defer rc.Close() + cnManager, err := rc.GetPolarDBXGroupManagerByBackup(backup) if err != nil { - return flow.Error(err, "get CN DataSource Failed") + return flow.Error(err, "Get CN dataSource failed") } // In case that there is no cdc in the cluster @@ -336,17 +336,17 @@ var SendHeartBeat = polardbxv1reconcile.NewStepBinder("SendHeartBeat", " PRIMARY KEY (`id`)\n) ENGINE = InnoDB AUTO_INCREMENT = 1666172319 DEFAULT CHARSET = utf8mb4 DEFAULT COLLATE = utf8mb4_0900_ai_ci broadcast" err = cnManager.CreateTable("__cdc__", heartbeatTableDDL) if err != nil { - return flow.Error(err, "Create Heartbeat table failed: "+err.Error()) + return flow.Error(err, "Create heartbeat table failed: "+err.Error()) } sname := strconv.FormatInt(time.Now().Unix(), 10) err = cnManager.SendHeartBeat(sname) if err != nil { - return flow.Error(err, "Send HeartBeat Failed") + return flow.Error(err, "Send heartBeat failed") } backup.Status.HeartBeatName = sname - return flow.Continue("HeartBeat Send!") + return flow.Continue("HeartBeat send!") }) var WaitHeartbeatSentToFollower = polardbxv1reconcile.NewStepBinder("WaitHeartbeatSentToFollower", @@ -396,16 +396,17 @@ var WaitHeartbeatSentToFollower = polardbxv1reconcile.NewStepBinder("WaitHeartbe return flow.Error(errors.New("timeout"), "waiting heartbeat sync timeout") case <-tick: for waitingPod, leaderPod := range waitingPodMap { - manager, _, err := rc.GetPolarDBXGroupManagerByXStorePod(*leaderPod) + manager, err := rc.GetXstoreGroupManagerByPod(leaderPod) if err != nil || manager == nil { return flow.Error(err, "unable to connect to leader", "leader pod", leaderPod.Name) } + clusterStatusList, err := manager.ShowClusterStatus() if err != nil { return flow.Error(err, "unable to get cluster status", "leader pod", leaderPod.Name) } - // leader may hava changed, just abort the backup + // leader may have changed, just abort the backup if len(clusterStatusList) != 3 { return flow.Error(errors.New("global cluster status incorrect, leader may have changed"), "leader pod", leaderPod.Name) @@ -445,22 +446,24 @@ var CollectBinlogEndIndex = polardbxv1reconcile.NewStepBinder("CollectBinlogEndI return flow.Error(err, "Unable to get XStore list") } for _, backupPod := range backupPodList { - groupManager, xstore, err := rc.GetPolarDBXGroupManagerByXStorePod(backupPod) + groupManager, err := rc.GetXstoreGroupManagerByPod(&backupPod) if err != nil { - return flow.Error(err, "get DN DataSource Failed", "podName:", backupPod) + return flow.Error(err, "get dn datasource failed", "pod:", backupPod) } - binlogOffset, err := groupManager.GetBinlogOffset() - if err != nil { - return flow.Error(err, "get binlogoffset Failed") + if groupManager == nil { + return flow.Error(errors.New("group manager is nil"), + "get group manager failed", "pod", backupPod.Name) } - err = rc.Close() + + binlogOffset, err := groupManager.GetBinlogOffset() if err != nil { - return flow.Error(err, "Close Database Failed") + return flow.Error(err, "get binlogoffset failed", "pod", backupPod.Name) } - binlogOffset = fmt.Sprintf("%s\ntimestamp:%s", binlogOffset, time.Now().Format("2006-01-02 15:04:05")) backupRootPath := pxcBackup.Status.BackupRootPath - remotePath := fmt.Sprintf("%s/%s/%s-end", backupRootPath, polardbxmeta.BinlogOffsetPath, xstore.Name) + xstoreName := backupPod.Labels[xstoremeta.LabelName] + binlogOffset = fmt.Sprintf("%s\ntimestamp:%s", binlogOffset, time.Now().Format("2006-01-02 15:04:05")) + remotePath := fmt.Sprintf("%s/%s/%s-end", backupRootPath, polardbxmeta.BinlogOffsetPath, xstoreName) command := command.NewCanonicalCommandBuilder().Collect(). UploadOffset(binlogOffset, remotePath, string(pxcBackup.Spec.StorageProvider.StorageName), pxcBackup.Spec.StorageProvider.Sink).Build() stdout := &bytes.Buffer{} @@ -553,7 +556,6 @@ var CreateSeekCpJob = polardbxv1reconcile.NewStepBinder("CreateSeekCpJob", return flow.Continue("SeekCp job already started!", "job-name", job.Name) } - jobName := GenerateJobName(polardbxBackup, "seekcp") xstoreBackupList, err := rc.GetXStoreBackups() if err != nil { return flow.Error(err, "Unable to get XStoreBackupList!") @@ -575,7 +577,7 @@ var CreateSeekCpJob = polardbxv1reconcile.NewStepBinder("CreateSeekCpJob", if len(targetPod.Name) == 0 { return flow.Error(err, "Unable to get targetPod!") } - job, err = newSeekCpJob(polardbxBackup, &targetPod, jobName) + job, err = newSeekCpJob(polardbxBackup, &targetPod) if err != nil { return flow.Error(err, "Unable to create SeekCpJob") } @@ -617,7 +619,8 @@ var WaitAllBinlogJobFinished = polardbxv1reconcile.NewStepBinder("WaitAllBinlogJ for _, xstoreBackup := range xstoreBackupList.Items { if xstoreBackup.Status.Phase != xstorev1.XStoreBinlogWaiting { - flow.Wait("xstorebackup is still backup binlog", "xstoreBackupName", xstoreBackup.Name) + return flow.RetryAfter(1*time.Minute, "XstoreBackup is still performing binlog backup", + "XstoreBackup name", xstoreBackup.Name) } } @@ -709,5 +712,107 @@ var SavePXCSecrets = polardbxv1reconcile.NewStepBinder("SavePXCSecrets", if err != nil { return flow.Error(err, "Unable to create account secret while backuping") } - return flow.Continue("PXC Secrets Saved!") + return flow.Continue("PolarDBX secret saved!") + }) + +var UploadClusterMetadata = polardbxv1reconcile.NewStepBinder("UploadClusterMetadata", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + // collect all the metadata and formatted them into a single json file + pxcBackup := rc.MustGetPolarDBXBackup() + polardbx, err := rc.GetPolarDBX() + if err != nil { + return flow.Error(err, "Unable to get original polardbx") + } + metadata := factory.MetadataBackup{ + PolarDBXClusterMetadata: factory.PolarDBXClusterMetadata{ + Name: polardbx.Name, + UID: polardbx.UID, + Spec: pxcBackup.Status.ClusterSpecSnapshot.DeepCopy(), + }, + XstoreMetadataList: make([]factory.XstoreMetadata, 0, len(pxcBackup.Status.Backups)), + BackupSetName: pxcBackup.Name, + BackupRootPath: pxcBackup.Status.BackupRootPath, + StartTime: pxcBackup.Status.StartTime, + EndTime: pxcBackup.Status.EndTime, + LatestRecoverableTimestamp: pxcBackup.Status.LatestRecoverableTimestamp, + } + + // xstore metadata and secrets + pxcSecret, err := rc.GetSecret(pxcBackup.Name) + if err != nil || pxcSecret == nil { + return flow.Error(err, "Unable to get secret for pxc", "pxc name", pxcBackup.Name) + } + metadata.PolarDBXClusterMetadata.Secrets = make([]polardbxv1polardbx.PrivilegeItem, 0, len(pxcSecret.Data)) + for user, passwd := range pxcSecret.Data { + metadata.PolarDBXClusterMetadata.Secrets = append( + metadata.PolarDBXClusterMetadata.Secrets, + polardbxv1polardbx.PrivilegeItem{ + Username: user, + Password: string(passwd), + }) + } + for xstoreName, xstoreBackupName := range pxcBackup.Status.Backups { + var xstore xstorev1.XStore + err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: xstoreName}, &xstore) + if err != nil { + return flow.Error(err, "Unable to get xstore by name", "xstore name", xstoreName) + } + xstoreSecret, err := rc.GetSecret(xstoreBackupName) + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Unable to get secret for xstore", "xstore name", xstoreName) + } else if xstoreSecret == nil { + return flow.RetryAfter(5*time.Second, "Wait for the creation of xstore secret bacup", + "xstore name", xstoreName) + } + xstoreBackup, err := rc.GetXstoreBackupByName(xstoreBackupName) + if err != nil || xstoreBackup == nil { + return flow.Error(err, "Unable to get backup for xstore", "xstore name", xstoreName) + } + + xstoreMetadata := factory.XstoreMetadata{ + Name: xstoreName, + UID: xstore.UID, + BackupName: xstoreBackupName, + LastCommitIndex: xstoreBackup.Status.CommitIndex, + Secrets: make([]polardbxv1polardbx.PrivilegeItem, 0, len(xstoreSecret.Data)), + } + for user, passwd := range xstoreSecret.Data { + xstoreMetadata.Secrets = append( + xstoreMetadata.Secrets, + polardbxv1polardbx.PrivilegeItem{ + Username: user, + Password: string(passwd), + }) + } + metadata.XstoreMetadataList = append(metadata.XstoreMetadataList, xstoreMetadata) + } + + // parse metadata to json string + jsonString, err := json.Marshal(metadata) + if err != nil { + return flow.RetryErr(err, "Failed to marshal metadata, retry to upload metadata") + } + + // init filestream client and upload formatted metadata + filestreamClient, err := rc.GetFilestreamClient() + metadataBackupPath := fmt.Sprintf("%s/metadata", metadata.BackupRootPath) + if err != nil { + return flow.RetryAfter(10*time.Second, "Failed to get filestream client, error: "+err.Error()) + } + filestreamAction, err := polardbxv1polardbx.NewBackupStorageFilestreamAction(pxcBackup.Spec.StorageProvider.StorageName) + if err != nil { + return flow.RetryAfter(10*time.Second, "Unsupported storage provided") + } + actionMetadata := filestream.ActionMetadata{ + Action: filestreamAction.Upload, + Sink: pxcBackup.Spec.StorageProvider.Sink, + RequestId: uuid.New().String(), + Filename: metadataBackupPath, + } + sendBytes, err := filestreamClient.Upload(bytes.NewReader(jsonString), actionMetadata) + if err != nil { + return flow.RetryAfter(10*time.Second, "Upload metadata failed, error: "+err.Error()) + } + flow.Logger().Info("Uploading metadata finished", "sent bytes", sendBytes) + return flow.Continue("Metadata uploaded.") }) diff --git a/pkg/operator/v1/polardbx/steps/backup/common/seekcpjob.go b/pkg/operator/v1/polardbx/steps/backup/common/seekcp_job.go similarity index 89% rename from pkg/operator/v1/polardbx/steps/backup/common/seekcpjob.go rename to pkg/operator/v1/polardbx/steps/backup/common/seekcp_job.go index a2e81e0..92b3ae7 100644 --- a/pkg/operator/v1/polardbx/steps/backup/common/seekcpjob.go +++ b/pkg/operator/v1/polardbx/steps/backup/common/seekcp_job.go @@ -21,10 +21,11 @@ import ( k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/pointer" ) @@ -59,7 +60,7 @@ func patchTaskConfigMapVolumeAndVolumeMounts(polardbxBackup *polardbxv1.PolarDBX VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: util.PolarDBXBackupStableName(polardbxBackup, "seekcp"), + Name: name.PolarDBXBackupStableName(polardbxBackup, "seekcp"), }, }, }, @@ -78,7 +79,7 @@ func patchTaskConfigMapVolumeAndVolumeMounts(polardbxBackup *polardbxv1.PolarDBX } } -func newSeekCpJob(pxcBackup *polardbxv1.PolarDBXBackup, targetPod *corev1.Pod, jobName string) (*batchv1.Job, error) { +func newSeekCpJob(pxcBackup *polardbxv1.PolarDBXBackup, targetPod *corev1.Pod) (*batchv1.Job, error) { podSpec := targetPod.Spec.DeepCopy() podSpec.InitContainers = nil podSpec.RestartPolicy = corev1.RestartPolicyNever @@ -99,6 +100,10 @@ func newSeekCpJob(pxcBackup *polardbxv1.PolarDBXBackup, targetPod *corev1.Pod, j replaceSystemEnvs(podSpec, targetPod) patchTaskConfigMapVolumeAndVolumeMounts(pxcBackup, podSpec) + jobName := name.NewSplicedName( + name.WithTokens("seekcp", "job", pxcBackup.Name, rand.String(4)), + name.WithPrefix("seekcp-job"), + ) job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -106,6 +111,7 @@ func newSeekCpJob(pxcBackup *polardbxv1.PolarDBXBackup, targetPod *corev1.Pod, j Labels: map[string]string{ meta.SeekCpJobLabelBackupName: pxcBackup.Name, meta.SeekCpJobLabelPXCName: pxcBackup.Spec.Cluster.Name, + meta.SeekCpJobLabelPodName: targetPod.Name, }, }, Spec: batchv1.JobSpec{ @@ -115,6 +121,7 @@ func newSeekCpJob(pxcBackup *polardbxv1.PolarDBXBackup, targetPod *corev1.Pod, j Labels: map[string]string{ meta.SeekCpJobLabelBackupName: pxcBackup.Name, meta.SeekCpJobLabelPXCName: pxcBackup.Spec.Cluster.Name, + meta.SeekCpJobLabelPodName: targetPod.Name, }, }, Spec: *podSpec, diff --git a/pkg/operator/v1/polardbx/steps/backup/schedule/schedule.go b/pkg/operator/v1/polardbx/steps/backup/schedule/schedule.go new file mode 100644 index 0000000..ff5ea36 --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backup/schedule/schedule.go @@ -0,0 +1,127 @@ +package schedule + +import ( + "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/factory" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "github.com/robfig/cron" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sort" + "time" +) + +var PersistPolarDBXBackupScheduleStatus = polardbxv1reconcile.NewStepBinder("PersistPolarDBXBackupScheduleStatus", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + if rc.IsPolarDBXBackupScheduleStatusChanged() { + err := rc.UpdatePolarDBXBackupScheduleStatus() + if err != nil { + return flow.Error(err, "Unable to update PolarDBX backup schedule status.") + } + return flow.Continue("PolarDBX backup schedule status updated.") + } + return flow.Continue("PolarDBX backup schedule status has not been changed.") + }) + +var CleanOutdatedBackupSet = polardbxv1reconcile.NewStepBinder("CleanOutdatedBackupSet", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupSchedule := rc.MustGetPolarDBXBackupSchedule() + if backupSchedule.Spec.MaxBackupCount == 0 { + return flow.Continue("No limit on the count of backup set.") + } + + backupList, err := rc.GetPolarDBXBackupListByScheduleName(backupSchedule.Name) + if err != nil { + return flow.Error(err, "Failed to get backup list.", "schedule name", backupSchedule.Name) + } + if backupSchedule.Spec.MaxBackupCount > len(backupList.Items) { + return flow.Continue("No outdated backup set needs to be cleaned.") + } + + backupItems := backupList.Items + sort.Slice(backupItems, func(i, j int) bool { + a, b := backupItems[i], backupItems[j] + return a.CreationTimestamp.Before(&b.CreationTimestamp) + }) + + for i := 0; i < len(backupItems)-backupSchedule.Spec.MaxBackupCount; i++ { + flow.Logger().Info("Delete outdated backup", "backup name", backupItems[i].Name) + err := rc.Client().Delete(rc.Context(), &backupItems[i]) + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Failed to delete backup.", "backup name", backupItems[i].Name) + } + } + + return flow.Continue("Outdated backup set cleaned.") + }) + +var CheckNextScheduleTime = polardbxv1reconcile.NewStepBinder("CheckNextScheduleTime", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupSchedule := rc.MustGetPolarDBXBackupSchedule() + + // Parse schedule + schedule, err := cron.ParseStandard(backupSchedule.Spec.Schedule) + if err != nil { + return flow.Error(err, "Parse schedule string failed.") + } + + // Check whether it is time to perform backup + var lastTime time.Time + if backupSchedule.Status.LastBackupTime != nil { + lastTime = backupSchedule.Status.LastBackupTime.Time + } else { + lastTime = backupSchedule.CreationTimestamp.Time + } + nextTime := schedule.Next(lastTime) + currentTime := time.Now() + + // Ready for backup + if nextTime.Before(currentTime) { + return flow.Continue("It is high time for backup.") + } + + // Wait until next backup + backupSchedule.Status.NextBackupTime = &metav1.Time{Time: nextTime} + return flow.RetryAfter(nextTime.Sub(time.Now()), "It is not the time for backup.", "next backup time", nextTime) + }) + +var CheckUnderwayBackup = polardbxv1reconcile.NewStepBinder("CheckUnderwayBackup", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupSchedule := rc.MustGetPolarDBXBackupSchedule() + polardbxName := backupSchedule.Spec.BackupSpec.Cluster.Name + backupList, err := rc.GetPolarDBXBackupListByPolarDBXName(polardbxName) + if err != nil { + return flow.Error(err, "Failed to get backup list", "PolarDBX name", polardbxName) + } + for _, backup := range backupList.Items { + if backup.Status.Phase != v1.BackupFinished && backup.Status.Phase != v1.BackupFailed && backup.Status.Phase != v1.BackupDummy { + return flow.RetryAfter(1*time.Minute, "Backup is still underway", "backup name", backup.Name) + } + } + return flow.Continue("No backup is underway.") + }) + +var DispatchBackupTask = polardbxv1reconcile.NewStepBinder("DispatchBackupTask", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupSchedule := rc.MustGetPolarDBXBackupSchedule() + + // Perform backup + objectFactory := factory.NewObjectFactory(rc) + polardbxBackup, err := objectFactory.NewPolarDBXBackupBySchedule() + if err != nil { + return flow.RetryErr(err, "Failed to new backup.") + } + err = rc.Client().Create(rc.Context(), polardbxBackup) + if err != nil { + return flow.RetryErr(err, "Failed to create backup.") + } + flow.Logger().Info("New backup created", "backup", polardbxBackup.Name) + + // Record backup info + backupSchedule.Status.LastBackupTime = &metav1.Time{Time: time.Now()} + backupSchedule.Status.LastBackup = polardbxBackup.Name + + return flow.Continue("Backup task dispatched.") + }) diff --git a/pkg/operator/v1/polardbx/steps/backup/xstorejobbuilder/xstore_backup_builder.go b/pkg/operator/v1/polardbx/steps/backup/xstorejobbuilder/xstore_backup_builder.go deleted file mode 100644 index 876b787..0000000 --- a/pkg/operator/v1/polardbx/steps/backup/xstorejobbuilder/xstore_backup_builder.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2021 Alibaba Group Holding Limited. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package xstorebackup - -import ( - polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" - "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" -) - -func NewXStoreBackup(scheme *runtime.Scheme, backup *polardbxv1.PolarDBXBackup, xstore *polardbxv1.XStore) (*polardbxv1.XStoreBackup, error) { - - xstoreBackup := &polardbxv1.XStoreBackup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: backup.Namespace, - Name: backup.Name + "-" + xstore.Name, - Labels: map[string]string{ - meta.LabelName: backup.Spec.Cluster.Name, - meta.LabelTopBackup: backup.Name, - meta.LabelBackupXStore: xstore.Name, - meta.LabelBackupXStoreUID: string(xstore.UID), - }, - }, - Spec: polardbxv1.XStoreBackupSpec{ - XStore: polardbxv1.XStoreReference{ - Name: xstore.Name, - }, - RetentionTime: backup.Spec.RetentionTime, - StorageProvider: backup.Spec.StorageProvider, - }, - } - - // set preferred backup node - if node, ok := backup.Labels[meta.LabelPreferredBackupNode]; ok { - xstoreBackup.Labels[meta.LabelPreferredBackupNode] = node - } - - if err := ctrl.SetControllerReference(backup, xstoreBackup, scheme); err != nil { - return nil, err - } - return xstoreBackup, nil -} diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/expire_file.go b/pkg/operator/v1/polardbx/steps/backupbinlog/expire_file.go new file mode 100644 index 0000000..85e14e4 --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/expire_file.go @@ -0,0 +1,63 @@ +package backupbinlog + +import ( + "errors" + hpfs "github.com/alibaba/polardbx-operator/pkg/hpfs/proto" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "time" +) + +var TryDeleteExpiredFiles = polardbxv1reconcile.NewStepBinder("TryDeleteExpiredFiles", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + hpfsClient, err := GetHpfsClient(rc) + if err != nil { + return flow.RetryErr(err, "failed to get hpfs client") + } + now := time.Now() + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + seconds := backupBinlog.Spec.RemoteExpireLogHours.IntValue() * 3600 + if !backupBinlog.DeletionTimestamp.IsZero() { + seconds = -3600 + } + rep, err := hpfsClient.DeleteBinlogFilesBefore(rc.Context(), &hpfs.DeleteBinlogFilesBeforeRequest{ + Namespace: backupBinlog.Namespace, + PxcName: backupBinlog.Spec.PxcName, + PxcUid: backupBinlog.Spec.PxcUid, + SinkName: backupBinlog.Spec.StorageProvider.Sink, + SinkType: string(backupBinlog.Spec.StorageProvider.StorageName), + UnixTime: now.Unix() - int64(seconds), + }) + if err != nil { + flow.Logger().Error(err, "failed to DeleteBinlogFilesBefore") + } + if rep.Status.Code != hpfs.Status_OK { + flow.Logger().Error(errors.New(rep.GetStatus().String()), "failed response") + } + if rep.Status.Code == hpfs.Status_OK { + flow.Logger().Info("delete files", "files", rep.GetDeletedFiles()) + backupBinlog.Status.LastDeletedFiles = rep.GetDeletedFiles() + } + backupBinlog.Status.CheckExpireFileLastTime = uint64(now.Unix()) + + return flow.Continue("TryDeleteExpiredFiles.") +}) + +var ConfirmRemoteEmptyFiles = polardbxv1reconcile.NewStepBinder("TryDeleteExpiredFiles", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + hpfsClient, err := GetHpfsClient(rc) + if err != nil { + return flow.RetryErr(err, "failed to get hpfs client") + } + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + rep, err := hpfsClient.ListRemoteBinlogList(rc.Context(), &hpfs.ListRemoteBinlogListRequest{ + Namespace: backupBinlog.Namespace, + PxcName: backupBinlog.Spec.PxcName, + PxcUid: backupBinlog.Spec.PxcUid, + SinkName: backupBinlog.Spec.StorageProvider.Sink, + SinkType: string(backupBinlog.Spec.StorageProvider.StorageName), + }) + if err == nil && len(rep.Files) > 0 { + return flow.RetryErr(err, "file exists") + } + return flow.Continue("ConfirmRemoteEmptyFiles.") +}) diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/finalizer.go b/pkg/operator/v1/polardbx/steps/backupbinlog/finalizer.go new file mode 100644 index 0000000..330f77a --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/finalizer.go @@ -0,0 +1,29 @@ +package backupbinlog + +import ( + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var AddFinalizer = polardbxv1reconcile.NewStepBinder("AddFinalizer", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + if controllerutil.ContainsFinalizer(backupBinlog, meta.Finalizer) { + return flow.Pass() + } + controllerutil.AddFinalizer(backupBinlog, meta.Finalizer) + rc.MarkPolarDBXChanged() + return flow.Continue("Add finalizer.") +}) + +var RemoveFinalizer = polardbxv1reconcile.NewStepBinder("RemoveFinalizer", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + if !controllerutil.ContainsFinalizer(backupBinlog, meta.Finalizer) { + return flow.Pass() + } + controllerutil.RemoveFinalizer(backupBinlog, meta.Finalizer) + rc.MarkPolarDBXChanged() + return flow.Continue("Remove finalizer.") +}) diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/heartbeat.go b/pkg/operator/v1/polardbx/steps/backupbinlog/heartbeat.go new file mode 100644 index 0000000..21160fa --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/heartbeat.go @@ -0,0 +1,152 @@ +package backupbinlog + +import ( + "fmt" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var ReconcileHeartbeatJob = polardbxv1reconcile.NewStepBinder("ReconcileHeartbeatJob", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + if backupBinlog.Status.ObservedGeneration < backupBinlog.Generation { + polardbx := rc.MustGetPolarDBX() + deployment, err := createHeartbeatJob(rc, backupBinlog, polardbx) + if err != nil { + flow.Logger().Error(err, "failed to create heartbeat job") + return flow.Continue("ReconcileHeartbeatJob") + } + + exist := true + findDeployment := &appsv1.Deployment{} + err = rc.Client().Get(rc.Context(), types.NamespacedName{ + Namespace: deployment.Namespace, + Name: deployment.Name, + }, findDeployment) + if err != nil { + if apierrors.IsNotFound(err) { + exist = false + } else { + return flow.RetryErr(err, "failed to find heartbeat job", "name", deployment.Name) + } + } + if backupBinlog.Spec.PointInTimeRecover && !exist { + err := rc.SetControllerRefAndCreate(deployment) + if err != nil { + return flow.RetryErr(err, "failed to create heartbeat job") + } + } else if !backupBinlog.Spec.PointInTimeRecover && exist { + err := rc.Client().Delete(rc.Context(), deployment) + if err != nil { + return flow.RetryErr(err, "failed to delete heartbeat job") + } + } + } + return flow.Continue("ReconcileHeartbeatJob") +}) + +var TryDeleteHeartbeatJob = polardbxv1reconcile.NewStepBinder("TryDeleteHeartbeatJob", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + polardbx := rc.MustGetPolarDBX() + deployment, err := createHeartbeatJob(rc, backupBinlog, polardbx) + if err != nil { + return flow.RetryErr(err, "ReconcileHeartbeatJob") + } + err = rc.Client().Delete(rc.Context(), deployment) + if err != nil && !apierrors.IsNotFound(err) { + return flow.RetryErr(err, "failed to delete heartbeat job", "name", deployment.Name) + } + return flow.Continue("DeleteHeartbeatJob") +}) + +func createHeartbeatJob(rc *polardbxv1reconcile.Context, backupBinlog *polardbxv1.PolarDBXBackupBinlog, polardbx *polardbxv1.PolarDBXCluster) (*appsv1.Deployment, error) { + labels := map[string]string{ + meta.LabelJobType: string(meta.HeartbeatJobType), + meta.LabelName: polardbx.Name, + meta.LabelBackupBinlog: backupBinlog.Name, + } + heartbeatInterval, err := rc.Config().Backup().GetHeartbeatInterval() + if err != nil { + return nil, err + } + pod, err := rc.GetCNPod(polardbx) + if err != nil { + return nil, err + } + container := corev1.Container{ + Name: "job", + Image: rc.Config().Images().DefaultJobImage(), + Env: []corev1.EnvVar{ + { + Name: backupbinlog.EnvHeartbeatHost, + Value: polardbx.Name, + }, + { + Name: backupbinlog.EnvHeartbeatPort, + Value: "3306", + }, + { + Name: backupbinlog.EnvHeartbeatUser, + Value: "polardbx_root", + }, + { + Name: backupbinlog.EnvHeartbeatInterval, + Value: heartbeatInterval.String(), + }, + { + Name: backupbinlog.EnvMaxRetryCount, + Value: "10", + }, + { + Name: backupbinlog.EnvHeartbeatPassword, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: polardbx.Name, + }, + Key: "polardbx_root", + }, + }, + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("30Mi"), + }, + }, + } + var replicas int32 = 1 + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s%s", rc.Config().Backup().GetHeartbeatJobNamePrefix(), polardbx.Name), + Namespace: polardbx.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + ImagePullSecrets: pod.Spec.ImagePullSecrets, + Containers: []corev1.Container{container}, + }, + }, + }, + } + return deployment, nil +} diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/pxc.go b/pkg/operator/v1/polardbx/steps/backupbinlog/pxc.go new file mode 100644 index 0000000..78c02e2 --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/pxc.go @@ -0,0 +1,65 @@ +package backupbinlog + +import ( + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var InitFromPxc = polardbxv1reconcile.NewStepBinder("InitFromPxc", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + pxc := rc.MustGetPolarDBX() + pxc.SetAnnotations(k8shelper.PatchAnnotations(pxc.GetAnnotations(), map[string]string{ + meta.AnnotationBackupBinlog: "true", + })) + err := rc.Client().Update(rc.Context(), pxc) + if err != nil { + return flow.RetryErr(err, "failed to update pxc ", "pxc name", pxc.Name) + } + backupBinlog.Spec.PxcUid = string(pxc.UID) + labels := backupBinlog.GetLabels() + if labels == nil { + labels = map[string]string{} + } + labels = k8shelper.PatchLabels(labels, map[string]string{ + meta.LabelName: backupBinlog.Spec.PxcName, + meta.LabelUid: backupBinlog.Spec.PxcUid, + }) + backupBinlog.SetLabels(labels) + rc.MarkPolarDBXChanged() + return flow.Continue("InitFromPxc.") +}) + +var CleanFromPxc = polardbxv1reconcile.NewStepBinder("CleanFromPxc", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + pxc := rc.MustGetPolarDBX() + annotation := pxc.GetAnnotations() + if _, ok := annotation[meta.AnnotationBackupBinlog]; !ok { + return flow.Pass() + } + delete(annotation, meta.AnnotationBackupBinlog) + pxc.SetAnnotations(annotation) + err := rc.Client().Update(rc.Context(), pxc) + if err != nil { + return flow.RetryErr(err, "failed to update pxc ", "pxc name", pxc.Name) + } + rc.MarkPolarDBXChanged() + return flow.Continue("CleanFromPxc.") +}) + +func WhenPxcExist(binders ...control.BindFunc) control.BindFunc { + return polardbxv1reconcile.NewStepIfBinder("PxcExist", + func(rc *polardbxv1reconcile.Context, log logr.Logger) (bool, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + polardbx, err := rc.GetPolarDBX() + if apierrors.IsNotFound(err) || string(polardbx.UID) != backupBinlog.Spec.PxcUid { + return false, nil + } + return true, nil + }, + binders..., + ) +} diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/status.go b/pkg/operator/v1/polardbx/steps/backupbinlog/status.go new file mode 100644 index 0000000..c47312a --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/status.go @@ -0,0 +1,75 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backupbinlog + +import ( + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TransferPhaseTo(phase polardbxv1.BackupBinlogPhase, requeue ...bool) control.BindFunc { + return polardbxv1reconcile.NewStepBinder("TransferPhaseTo"+string(phase), + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + backupBinlog.Status.Phase = phase + rc.MarkPolarDBXChanged() + if len(requeue) == 0 || !requeue[0] { + return flow.Pass() + } else { + return flow.Retry("Phase updated!", "target-phase", phase) + } + }, + ) +} + +var PersistentBackupBinlog = polardbxv1reconcile.NewStepBinder("PersistentBackupBinlog", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + if rc.IsPolarDBXChanged() { + if err := rc.UpdatePolarDbXBackupBinlog(); err != nil { + return flow.Error(err, "Unable to persistent polardbx backup binlog.") + } + return flow.Continue("Succeeds to persistent polardbx backup binlog.") + } + return flow.Continue("Object not changed.") + }) + +func WhenDeleting(binders ...control.BindFunc) control.BindFunc { + return polardbxv1reconcile.NewStepIfBinder("Deleted", + func(rc *polardbxv1reconcile.Context, log logr.Logger) (bool, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + if backupBinlog.Status.Phase == polardbxv1.BackupBinlogPhaseDeleting { + return false, nil + } + deleting := !backupBinlog.DeletionTimestamp.IsZero() + return deleting, nil + }, + binders..., + ) +} + +var UpdateObservedGeneration = polardbxv1reconcile.NewStepBinder("UpdateObservedGeneration", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + prevGen := backupBinlog.Status.ObservedGeneration + backupBinlog.Status.ObservedGeneration = backupBinlog.Generation + return flow.Continue("Update observed generation.", "previous-generation", prevGen, + "current-generation", backupBinlog.Generation) + }, +) diff --git a/pkg/operator/v1/polardbx/steps/backupbinlog/sync_info.go b/pkg/operator/v1/polardbx/steps/backupbinlog/sync_info.go new file mode 100644 index 0000000..f593434 --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/backupbinlog/sync_info.go @@ -0,0 +1,248 @@ +package backupbinlog + +import ( + "bytes" + "crypto/md5" + "encoding/json" + "errors" + "fmt" + v1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + hpfs "github.com/alibaba/polardbx-operator/pkg/hpfs/proto" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" + "google.golang.org/grpc" + "path/filepath" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "strings" + "time" +) + +func GetHpfsClient(rc *polardbxv1reconcile.Context) (hpfs.HpfsServiceClient, error) { + hpfsConn, err := grpc.Dial(rc.Config().Store().HostPathFileServiceEndpoint(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + return hpfs.NewHpfsServiceClient(hpfsConn), nil +} + +func SyncInfoToXStore(rc *polardbxv1reconcile.Context, xstore *v1.XStore, info *backupbinlog.Info, infoHash string, hpfsClient hpfs.HpfsServiceClient) error { + if xstore.Status.BoundVolumes != nil { + for _, boundVolume := range xstore.Status.BoundVolumes { + workDir := filepath.Join(boundVolume.HostPath, "log") + if xstore.Spec.Config.Dynamic.LogDataSeparation { + workDir = filepath.Join(boundVolume.LogHostPath, "log") + } + resp, err := hpfsClient.GetWatcherInfoHash(rc.Context(), &hpfs.GetWatcherInfoHashRequest{ + Host: &hpfs.Host{NodeName: boundVolume.Host}, + LogDir: workDir, + }) + if err != nil { + return err + } + if resp.Status.Code != hpfs.Status_OK { + return errors.New("GetWatcherInfoHash status not ok: " + resp.Status.Code.String()) + } + if infoHash != resp.GetHash() { + podInfo := *info + podInfo.PodName = boundVolume.Pod + //check if logger by pod name + splitPodNames := strings.Split(podInfo.PodName, "-") + if len(splitPodNames) >= 2 { + if splitPodNames[len(splitPodNames)-2] == "log" { + podInfo.SinkType = config.SinkTypeNone + } + } + infoContent := getInfoContent(podInfo) + _, err := hpfsClient.OpenBackupBinlog(rc.Context(), &hpfs.OpenBackupBinlogRequest{ + Host: &hpfs.Host{NodeName: boundVolume.Host}, + LogDir: workDir, + Content: infoContent, + }) + if err != nil { + return err + } + if resp.Status.Code != hpfs.Status_OK { + return errors.New("OpenBackupBinlog status not ok: " + resp.Status.Code.String()) + } + } + } + + } + return nil +} + +func getInfoContent(info backupbinlog.Info) string { + buf := &bytes.Buffer{} + writeInfoField(buf, backupbinlog.InfoNamespace, info.Namespace) + writeInfoField(buf, backupbinlog.InfoPxcName, info.PxcName) + writeInfoField(buf, backupbinlog.InfoPxcUid, info.PxcUid) + writeInfoField(buf, backupbinlog.InfoXStoreName, info.XStoreName) + writeInfoField(buf, backupbinlog.InfoXStoreUid, info.XStoreUid) + writeInfoField(buf, backupbinlog.InfoPodName, info.PodName) + writeInfoField(buf, backupbinlog.InfoBinlogChecksum, info.BinlogChecksum) + writeInfoField(buf, backupbinlog.InfoSinkType, info.SinkType) + writeInfoField(buf, backupbinlog.InfoSinkName, info.SinkName) + writeInfoField(buf, backupbinlog.InfoXStoreUid, info.XStoreUid) + writeInfoField(buf, backupbinlog.InfoLocalExpireLogSeconds, strconv.FormatInt(info.LocalExpireLogSeconds, 10)) + writeInfoField(buf, backupbinlog.InfoMaxLocalBinlogCount, strconv.FormatInt(info.MaxLocalBinlogCount, 10)) + writeInfoField(buf, backupbinlog.InfoForbidPurge, strconv.FormatBool(info.ForbidPurge)) + return buf.String() +} + +func writeInfoField(buf *bytes.Buffer, fieldName string, value string) { + buf.Write([]byte(fieldName)) + buf.Write([]byte("=")) + buf.Write([]byte(value)) + buf.Write([]byte("\n")) +} + +func generateInfo(backupBinlog *v1.PolarDBXBackupBinlog, xStores []*v1.XStore) (map[string]*backupbinlog.Info, map[string]string, error) { + expireLogHours := backupBinlog.Spec.LocalExpireLogHours + expireLogSeconds := int64(expireLogHours.IntValue()) * 3600 + if expireLogSeconds == 0 { + parsedVal, err := strconv.ParseFloat(expireLogHours.String(), 64) + if err != nil { + return nil, nil, err + } + expireLogSeconds = int64(parsedVal * 3600) + } + dnPodInfoMap := map[string]*backupbinlog.Info{} + dnPodInfoHashMap := map[string]string{} + + sinkName := config.SinkTypeNone + if backupBinlog.Spec.StorageProvider.Sink != "" { + sinkName = backupBinlog.Spec.StorageProvider.Sink + } + sinkType := config.SinkTypeNone + if string(backupBinlog.Spec.StorageProvider.StorageName) != "" { + sinkType = string(backupBinlog.Spec.StorageProvider.StorageName) + } + + if xStores != nil { + for _, xstore := range xStores { + forbidPurge := xstore.Labels[xstoremeta.LabelBinlogPurgeLock] == xstoremeta.BinlogPurgeLock + info := &backupbinlog.Info{ + Namespace: backupBinlog.Namespace, + XStoreName: xstore.Name, + XStoreUid: string(xstore.UID), + SinkName: sinkName, + SinkType: sinkType, + BinlogChecksum: backupBinlog.Spec.BinlogChecksum, + PxcName: backupBinlog.Spec.PxcName, + PxcUid: backupBinlog.Spec.PxcUid, + LocalExpireLogSeconds: expireLogSeconds, + MaxLocalBinlogCount: int64(backupBinlog.Spec.MaxLocalBinlogCount), + ForbidPurge: forbidPurge, + } + dnPodInfoMap[xstore.Name] = info + infoJson, _ := json.Marshal(info) + dnPodInfoHashMap[xstore.Name] = fmt.Sprintf("%x", md5.Sum(infoJson)) + } + } + return dnPodInfoMap, dnPodInfoHashMap, nil +} + +var SyncInfo = polardbxv1reconcile.NewStepBinder("AddFinalizer", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + hpfsClient, err := GetHpfsClient(rc) + if err != nil { + return flow.RetryErr(err, "failed to get hpfs client") + } + dnMap, err := rc.GetDNMap() + if err != nil { + return flow.RetryErr(err, "failed to get dn map") + } + gms, err := rc.GetGMS() + if err != nil { + return flow.RetryErr(err, "failed to get gms") + } + xstores := make([]*v1.XStore, 0, len(dnMap)+1) + if gms.GetDeletionTimestamp().IsZero() { + xstores = append(xstores, gms) + } + for _, v := range dnMap { + if v.GetDeletionTimestamp().IsZero() { + xstores = append(xstores, v) + } + } + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + xstoreInfos, xstoreInfoHashes, err := generateInfo(backupBinlog, xstores) + if err != nil { + return flow.RetryErr(err, "failed to generateInfo") + } + for _, xstore := range xstores { + xstoreInfo, ok := xstoreInfos[xstore.Name] + if !ok { + continue + } + err := SyncInfoToXStore(rc, xstore, xstoreInfo, xstoreInfoHashes[xstore.Name], hpfsClient) + if err != nil { + flow.Logger().Error(err, "failed to sync info to xstore", "xstoreName", xstore.Name) + } + } + return flow.Continue("SyncInfo.") +}) + +var RunningRoute = polardbxv1reconcile.NewStepBinder("AddFinalizer", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + backupBinlog := rc.MustGetPolarDBXBackupBinlog() + now := time.Now() + checkInterval, err := rc.Config().Backup().CheckBinlogExpiredFileInterval() + if err != nil { + flow.Logger().Error(err, "failed to get check backup binlog interval") + return flow.Pass() + } + + if now.Unix()-int64(backupBinlog.Status.CheckExpireFileLastTime) > int64(checkInterval.Seconds()) { + backupBinlog.Status.Phase = v1.BackupBinlogPhaseCheckExpiredFile + rc.MarkPolarDBXChanged() + } + + return flow.RetryAfter(5*time.Second, "RunningRoute.") +}) + +var CloseBackupBinlog = polardbxv1reconcile.NewStepBinder("CloseBackupBinlog", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + hpfsClient, err := GetHpfsClient(rc) + if err != nil { + return flow.RetryErr(err, "failed to get hpfs client") + } + dnMap, err := rc.GetDNMap() + if err != nil { + return flow.RetryErr(err, "failed to get dn map") + } + gms, err := rc.GetGMS() + if err != nil { + return flow.RetryErr(err, "failed to get gms") + } + xstores := make([]*v1.XStore, 0, len(dnMap)+1) + if gms.GetDeletionTimestamp().IsZero() { + xstores = append(xstores, gms) + } + for _, v := range dnMap { + if v.GetDeletionTimestamp().IsZero() { + xstores = append(xstores, v) + } + } + for _, xstore := range xstores { + for _, boundVolume := range xstore.Status.BoundVolumes { + workDir := filepath.Join(boundVolume.HostPath, "log") + if xstore.Spec.Config.Dynamic.LogDataSeparation { + workDir = filepath.Join(boundVolume.LogHostPath, "log") + } + resp, err := hpfsClient.CloseBackupBinlog(rc.Context(), &hpfs.CloseBackupBinlogRequest{ + Host: &hpfs.Host{NodeName: boundVolume.Host}, + LogDir: workDir, + }) + if err != nil { + return flow.RetryErr(err, "failed to close backup binlog", "logDir", workDir, "nodeName", boundVolume.Host) + } + if resp.Status.Code != hpfs.Status_OK { + return flow.RetryErr(errors.New("CloseBackupBinlogRequest status not ok: "+resp.Status.Code.String()), "logDir", workDir, "nodeName", boundVolume.Host) + } + + } + } + return flow.Continue("CloseBackupBinlog.") +}) diff --git a/pkg/operator/v1/polardbx/steps/instance/annotation.go b/pkg/operator/v1/polardbx/steps/instance/annotation.go index b3cfc47..e67b6cb 100644 --- a/pkg/operator/v1/polardbx/steps/instance/annotation.go +++ b/pkg/operator/v1/polardbx/steps/instance/annotation.go @@ -17,6 +17,9 @@ limitations under the License. package instance import ( + polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/alibaba/polardbx-operator/pkg/k8s/control" @@ -33,3 +36,43 @@ func RemoveAnnotation(key string) control.BindFunc { return flow.Pass() }) } + +var TrySetRunMode = polardbxreconcile.NewStepBinder("TrySetRunMode", + func(rc *polardbxreconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + if !rc.Config().Cluster().EnableRunModeCheck() { + return flow.Pass() + } + if polardbx.Annotations == nil { + polardbx.SetAnnotations(map[string]string{}) + } + runmode, ok := polardbx.Annotations["runmode"] + if !ok { + runmode = "none" + } + var podList v1.PodList + err := rc.Client().List(rc.Context(), &podList, client.InNamespace(polardbx.Namespace), client.MatchingLabels{ + polardbxmeta.LabelName: polardbx.Name, + }) + if err != nil { + return flow.RetryErr(err, "failed to get podlist") + } + + for _, pod := range podList.Items { + annotations := pod.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + mode, ok := annotations["runmode"] + if !ok || mode != runmode { + annotations["runmode"] = runmode + pod.SetAnnotations(annotations) + err := rc.Client().Update(rc.Context(), &pod) + if err != nil { + return flow.RetryErr(err, "failed to update pod runmode annotation", "PodName", pod.GetName()) + } + } + } + return flow.Pass() + }, +) diff --git a/pkg/operator/v1/polardbx/steps/instance/common/object.go b/pkg/operator/v1/polardbx/steps/instance/common/object.go index eb2f10d..3e6eef6 100644 --- a/pkg/operator/v1/polardbx/steps/instance/common/object.go +++ b/pkg/operator/v1/polardbx/steps/instance/common/object.go @@ -17,14 +17,21 @@ limitations under the License. package common import ( - k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" - polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - + "bytes" + "encoding/json" + "errors" polardbxv1polardbx "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" "github.com/alibaba/polardbx-operator/pkg/k8s/control" + k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/factory" "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/helper" + polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + polarxPath "github.com/alibaba/polardbx-operator/pkg/util/path" + "github.com/google/uuid" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var PersistentStatus = polardbxv1reconcile.NewStepBinder("PersistentStatus", @@ -123,26 +130,168 @@ var CheckDNs = polardbxv1reconcile.NewStepBinder("CheckDNs", }, ) +// helper function to download metadata backup from remote storage +func downloadMetadataBackup(rc *polardbxv1reconcile.Context) (*factory.MetadataBackup, error) { + polardbx := rc.MustGetPolarDBX() + filestreamClient, err := rc.GetFilestreamClient() + if err != nil { + return nil, errors.New("failed to get filestream client, error: " + err.Error()) + } + filestreamAction, err := polardbxv1polardbx.NewBackupStorageFilestreamAction(polardbx.Spec.Restore.StorageProvider.StorageName) + + downloadActionMetadata := filestream.ActionMetadata{ + Action: filestreamAction.Download, + Sink: polardbx.Spec.Restore.StorageProvider.Sink, + RequestId: uuid.New().String(), + Filename: polarxPath.NewPathFromStringSequence(polardbx.Spec.Restore.From.BackupSetPath, "metadata"), + } + var downloadBuffer bytes.Buffer + recvBytes, err := filestreamClient.Download(&downloadBuffer, downloadActionMetadata) + if err != nil { + return nil, errors.New("download metadata failed, error: " + err.Error()) + } + if recvBytes == 0 { + return nil, errors.New("no byte received, please check storage config and target path") + } + metadata := &factory.MetadataBackup{} + err = json.Unmarshal(downloadBuffer.Bytes(), &metadata) + if err != nil { + return nil, errors.New("failed to parse metadata, error: " + err.Error()) + } + return metadata, nil +} + +// CreateDummyBackupObject creates dummy polardbx backup when BackupSetPath provided. +// The dummy backup object has only necessary information for restore. +var CreateDummyBackupObject = polardbxv1reconcile.NewStepBinder("CreateDummyBackupObject", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + if polardbx.Spec.Restore.BackupSet != "" { + return flow.Continue("Backup set is specified, no need to create dummy backup object") + } + if polardbx.Spec.Restore.From.BackupSetPath == "" { + return flow.Continue("BackupSetPath is not specified, no need to create dummy backup object") + } + + metadata, err := downloadMetadataBackup(rc) + if err != nil { + helper.TransferPhase(polardbx, polardbxv1polardbx.PhaseFailed) + return flow.Error(err, "Failed to download metadata from backup set path", + "path", polardbx.Spec.Restore.From.BackupSetPath) + } + + // Create dummy polardbx backup + objectFactory := factory.NewObjectFactory(rc) + polardbxBackup, err := objectFactory.NewDummyPolarDBXBackup(metadata) + if err != nil { + return flow.Error(err, "Failed to new dummy polardbx backup") + } + err = rc.SetControllerRefAndCreate(polardbxBackup) + if err != nil { + return flow.Error(err, "Failed to create dummy polardbx backup") + } + polardbxSecretBackup, err := objectFactory.NewDummySecretBackup(metadata.PolarDBXClusterMetadata.Name, metadata) + if err != nil { + return flow.Error(err, "Failed to new dummy polardbx secret backup") + } + err = rc.SetControllerToOwnerAndCreate(polardbxBackup, polardbxSecretBackup) + if err != nil { + return flow.Error(err, "Failed to create dummy polardbx secret backup") + } + + // Create dummy xstore backup and update its status + for _, xstoreName := range metadata.GetXstoreNameList() { + xstoreBackup, err := objectFactory.NewDummyXstoreBackup(xstoreName, polardbxBackup, metadata) + if err != nil { + return flow.Error(err, "Failed to new dummy xstore backup", "xstore", xstoreName) + } + err = rc.SetControllerToOwnerAndCreate(polardbxBackup, xstoreBackup) + if err != nil { + return flow.Error(err, "Failed to create dummy xstore backup", "xstore", xstoreName) + } + err = rc.Client().Status().Update(rc.Context(), xstoreBackup) + if err != nil { + return flow.Error(err, "Failed to update dummy xstore backup status", "xstore", xstoreName) + } + + xstoreSecretBackup, err := objectFactory.NewDummySecretBackup(xstoreName, metadata) + if err != nil { + return flow.Error(err, "Failed to new dummy xstore secret backup", "xstore", xstoreName) + } + err = rc.SetControllerToOwnerAndCreate(polardbxBackup, xstoreSecretBackup) + if err != nil { + return flow.Error(err, "Failed to create dummy xstore secret backup", "xstore", xstoreName) + } + + // record xstore and its backup for restore + polardbxBackup.Status.Backups[xstoreName] = xstoreBackup.Name + } + + // Update status of polardbx backup + err = rc.Client().Status().Update(rc.Context(), polardbxBackup) + if err != nil { + return flow.Error(err, "Failed to update dummy polardbx backup status") + } + + // The dummy backup object will be used in the later restore by setting it as backup set + polardbx.Spec.Restore.BackupSet = polardbxBackup.Name + err = rc.Client().Update(rc.Context(), polardbx) + if err != nil { + return flow.Error(err, "Failed to update backup set of restore spec") + } + + return flow.Continue("Dummy backup object created!") + }) + // SyncSpecFromBackupSet aims to sync spec with original pxc cluster from backup set, -// currently restore does not support change DN replicas +// if `SyncSpecWithOriginalCluster` is true, all the spec from original pxc will be applied to new pxc, +// otherwise only original dn replicas will be applied, currently restore does not support change DN replicas. var SyncSpecFromBackupSet = polardbxv1reconcile.NewStepBinder("SyncSpecFromBackupSet", func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { polardbx := rc.MustGetPolarDBX() pxcBackup, err := rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) - if err != nil { - return flow.Error(err, "Unable to get polardbx backup {}", pxcBackup.Name) + + // just let the creation fail if pxb not found + if err != nil || pxcBackup == nil { + helper.TransferPhase(polardbx, polardbxv1polardbx.PhaseFailed) + return flow.Error(errors.New("sync spec failed"), "Unable to get polardbx backup in current namespace", + "pxb", polardbx.Spec.Restore.BackupSet, "error", err) } - // TODO(dengli): load spec from remote backup set if polardbx.Spec.Restore.SyncSpecWithOriginalCluster { + restoreSpec := polardbx.Spec.Restore.DeepCopy() + serviceName := polardbx.Spec.ServiceName polardbx.Spec = *pxcBackup.Status.ClusterSpecSnapshot - } else { // ensure that restored cluster have the same dn replicas with original cluster + // ensure the operator can enter the restoring phase + polardbx.Spec.Restore = restoreSpec + // avoid using service name of original cluster + polardbx.Spec.ServiceName = serviceName + } else { + // ensure that restored cluster have the same dn replicas with original cluster polardbx.Spec.Topology.Nodes.DN.Replicas = pxcBackup.Status.ClusterSpecSnapshot.Topology.Nodes.DN.Replicas } + err = rc.Client().Update(rc.Context(), polardbx) if err != nil { return flow.Error(err, "Failed to sync topology from backup set") } - return flow.Pass() + return flow.Continue("Spec synced!") + }, +) + +var CleanDummyBackupObject = polardbxv1reconcile.NewStepBinder("CleanDummyBackupObject", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + pxcBackup, err := rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Failed to get polardbx backup", "backup set", pxcBackup.Name) + } + if pxcBackup == nil || pxcBackup.Annotations[polardbxmeta.AnnotationDummyBackup] != "true" { + return flow.Continue("Dummy backup object not exists, just skip") + } + if err := rc.Client().Delete(rc.Context(), pxcBackup); err != nil { + return flow.Error(err, "Failed to delete dummy backup object") + } + return flow.Continue("Dummy backup object cleaned!") }, ) diff --git a/pkg/operator/v1/polardbx/steps/instance/gms/gms.go b/pkg/operator/v1/polardbx/steps/instance/gms/gms.go index 1383c91..bb436e1 100644 --- a/pkg/operator/v1/polardbx/steps/instance/gms/gms.go +++ b/pkg/operator/v1/polardbx/steps/instance/gms/gms.go @@ -17,9 +17,11 @@ limitations under the License. package gms import ( + "errors" "fmt" "github.com/alibaba/polardbx-operator/pkg/util/network" corev1 "k8s.io/api/core/v1" + "strings" "time" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -76,10 +78,58 @@ var InitializeSchemas = polardbxreconcile.NewStepBinder("InitializeSchemas", }, ) +func getHashFromXstoreName(xstoreNames interface{}) (string, error) { + switch xstoreNames.(type) { + case []string: + for _, xstoreName := range xstoreNames.([]string) { + splitXStoreName := strings.Split(xstoreName, "-") + if splitXStoreName[len(splitXStoreName)-2] == "dn" { // hack way to get pxc hash + return splitXStoreName[len(splitXStoreName)-3], nil + } + } + case map[string]string: + for xstoreName := range xstoreNames.(map[string]string) { + splitXStoreName := strings.Split(xstoreName, "-") + if splitXStoreName[len(splitXStoreName)-2] == "dn" { // hack way to get pxc hash + return splitXStoreName[len(splitXStoreName)-3], nil + } + } + } + return "", errors.New("failed to get hash from name of xstore") +} + +// getOriginalPxcInfo is a helper function to extract hash and name of original pxc during restore +func getOriginalPxcInfo(rc *polardbxreconcile.Context) (string, string, error) { + polardbx := rc.MustGetPolarDBX() + backup := &polardbxv1.PolarDBXBackup{} + var err error + if polardbx.Spec.Restore.BackupSet == "" && len(polardbx.Spec.Restore.BackupSet) == 0 { + backup, err = rc.GetCompletedPXCBackup(map[string]string{polardbxmeta.LabelName: polardbx.Spec.Restore.From.PolarBDXName}) + } else { + backup, err = rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) + } + if err != nil { + return "", "", err + } + + var pxcHash, pxcName string + if backup != nil { + pxcName = backup.Spec.Cluster.Name + for _, xstoreName := range backup.Status.XStores { + splitXStoreName := strings.Split(xstoreName, "-") + if splitXStoreName[len(splitXStoreName)-2] == "dn" { // hack way to get pxc hash + pxcHash = splitXStoreName[len(splitXStoreName)-3] + } + } + return pxcHash, pxcName, nil + } + return "", "", errors.New("failed to get hash from name of xstore") +} + var RestoreSchemas = polardbxreconcile.NewStepBinder("RestoreSchemas", func(rc *polardbxreconcile.Context, flow control.Flow) (reconcile.Result, error) { polarDBX := rc.MustGetPolarDBX() - oldPXCHash, oldPXCName, err := rc.GetXStoreNameForOldPXC() + originalPXCHash, originalPXCName, err := getOriginalPxcInfo(rc) if err != nil { return flow.Error(err, "Get oldXStoreName Failed") } @@ -94,7 +144,7 @@ var RestoreSchemas = polardbxreconcile.NewStepBinder("RestoreSchemas", } if !restored { - err = mgr.RestoreSchemas(oldPXCName, oldPXCHash, polarDBX.Status.Rand) + err = mgr.RestoreSchemas(originalPXCName, originalPXCHash, polarDBX.Status.Rand) if err != nil { return flow.Error(err, "Unable to restore GMS schemas.") } @@ -268,7 +318,8 @@ func transformIntoStorageInfos(rc *polardbxreconcile.Context, polardbx *polardbx xProtocolPort = privateServicePort.Port } - storageType, err := gms.GetStorageType(xstore.Spec.Engine, xstore.Status.EngineVersion) + annoStorageType, _ := polardbx.Annotations[polardbxmeta.AnnotationStorageType] + storageType, err := gms.GetStorageType(xstore.Spec.Engine, xstore.Status.EngineVersion, annoStorageType) if err != nil { return nil, err } diff --git a/pkg/operator/v1/polardbx/steps/instance/object.go b/pkg/operator/v1/polardbx/steps/instance/object.go index 4d657e8..b43a8ce 100644 --- a/pkg/operator/v1/polardbx/steps/instance/object.go +++ b/pkg/operator/v1/polardbx/steps/instance/object.go @@ -57,30 +57,25 @@ var CreateSecretsIfNotFound = polardbxv1reconcile.NewStepBinder("CreateSecretsIf } if accountSecret == nil { if polarDBX.Spec.Restore != nil { - secret, err := rc.GetPolarDBXSecretForRestore() - if err != nil { - return flow.Error(err, "Unable to get old secret for restore") - } - accountSecret, err = factory.NewObjectFactory(rc).NewSecretFromPolarDBX(secret) - if err != nil { - return flow.Error(err, "Unable to new account secret while restoring.") + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Unable to get original secret for restore") } - err = rc.SetControllerRefAndCreate(accountSecret) + accountSecret, err = factory.NewObjectFactory(rc).NewSecretForRestore() if err != nil { - return flow.Error(err, "Unable to create account secret while restoring.") + return flow.Error(err, "Unable to new account secret during restoring.") } } else { accountSecret, err = factory.NewObjectFactory(rc).NewSecret() if err != nil { return flow.Error(err, "Unable to new account secret.") } - err = rc.SetControllerRefAndCreate(accountSecret) - if err != nil { - return flow.Error(err, "Unable to create account secret.") - } - + } + err = rc.SetControllerRefAndCreate(accountSecret) + if err != nil { + return flow.Error(err, "Unable to create account secret.") } } + keySecret, err := rc.GetPolarDBXSecret(convention.SecretTypeSecurity) if client.IgnoreNotFound(err) != nil { return flow.Error(err, "Unable to get encode key secret.") @@ -652,6 +647,14 @@ var CreateFileStorage = polardbxv1reconcile.NewStepBinder("CreateFileStorage", return flow.Error(err, "Failed to get CN group manager.") } + supportFileStorage, err := groupManager.CheckFileStorageCompatibility() + if err != nil { + return flow.Error(err, "Failed to check compatibility of file storage.") + } + if !supportFileStorage { + return flow.Continue("Current pxc does not support file storage.") + } + fileStorageInfoList, err := groupManager.ListFileStorage() if err != nil { return flow.Error(err, "Failed to get file storage list") diff --git a/pkg/operator/v1/polardbx/steps/instance/pitr/job.go b/pkg/operator/v1/polardbx/steps/instance/pitr/job.go new file mode 100644 index 0000000..d94433d --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/instance/pitr/job.go @@ -0,0 +1,301 @@ +package pitr + +import ( + "fmt" + polarxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" + "github.com/alibaba/polardbx-operator/pkg/meta/core/gms/security" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "github.com/alibaba/polardbx-operator/pkg/pitr" + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "path/filepath" + "sigs.k8s.io/controller-runtime/pkg/client" + "strings" +) + +const ( + HttpServerPort = 13333 + PortName = "server" + ContainerName = "job" +) + +func NewJobName(name string) string { + result := fmt.Sprintf("sys-pitr-%s", name) + if len(result) > 63 { + result = security.MustSha1Hash(result) + } + return result +} + +func CreateTaskConfig(rc *polardbxv1reconcile.Context, pxcBackup *polarxv1.PolarDBXBackup) (*pitr.TaskConfig, error) { + polardbx := rc.MustGetPolarDBX() + var namespace string + var sinkName string + var sinkType string + var binlogChecksum string + binlogSource := polardbx.Spec.Restore.BinlogSource + if binlogSource != nil { + namespace = binlogSource.Namespace + if binlogSource.StorageProvider != nil { + sinkType = string(binlogSource.StorageProvider.StorageName) + sinkName = binlogSource.StorageProvider.Sink + } + binlogChecksum = binlogSource.Checksum + } + if namespace == "" { + namespace = polardbx.Namespace + } + // try get from PolarDBXBackupBinlog object + var backupBinlogs polarxv1.PolarDBXBackupBinlogList + err := rc.Client().List(rc.Context(), &backupBinlogs, client.InNamespace(namespace), client.MatchingLabels{ + meta.LabelName: pxcBackup.Spec.Cluster.Name, + meta.LabelUid: string(pxcBackup.Spec.Cluster.UID), + }) + if err != nil { + return nil, err + } + //pxcBackup.Spec.Cluster.Name + if sinkName == "" { + if len(backupBinlogs.Items) > 0 { + backupBinlog := backupBinlogs.Items[0] + sinkName = backupBinlog.Spec.StorageProvider.Sink + sinkType = string(backupBinlog.Spec.StorageProvider.StorageName) + } + } + if sinkName == "" { + if polardbx.Spec.Restore.StorageProvider != nil { + sinkName = polardbx.Spec.Restore.StorageProvider.Sink + sinkType = string(polardbx.Spec.Restore.StorageProvider.StorageName) + } + } + if binlogChecksum == "" { + if len(backupBinlogs.Items) > 0 { + binlogChecksum = backupBinlogs.Items[0].Spec.BinlogChecksum + } + } + if binlogChecksum == "" { + binlogChecksum = "crc32" + } + + var xstoreBackups polarxv1.XStoreBackupList + err = rc.Client().List(rc.Context(), &xstoreBackups, client.InNamespace(rc.Namespace()), client.MatchingLabels{ + meta.LabelName: pxcBackup.Spec.Cluster.Name, + meta.LabelTopBackup: pxcBackup.Name, + }) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("failed to get xstore backup list, polardbx name = %s, backup name = %s", pxcBackup.Spec.Cluster.Name, pxcBackup.Name)) + } + xstoreConfigs := generateXStoreTaskConfigs(xstoreBackups.Items) + + var currentXStores polarxv1.XStoreList + err = rc.Client().List(rc.Context(), ¤tXStores, client.InNamespace(pxcBackup.Namespace), client.MatchingLabels{ + meta.LabelName: pxcBackup.Spec.Cluster.Name, + }) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("failed to list xstores of namespace = %s , polardbx = %s", pxcBackup.Namespace, pxcBackup.Spec.Cluster.Name)) + } + for _, xstore := range currentXStores.Items { + xStoreConfig, ok := xstoreConfigs[xstore.Name] + if ok && xStoreConfig.XStoreUid == string(xstore.UID) { + for podName, podVolume := range xstore.Status.BoundVolumes { + pod, ok := xStoreConfig.Pods[podName] + if ok { + pod.Host = podVolume.Host + pod.LogDir = filepath.Join(podVolume.HostPath, "log") + if xstore.Spec.Config.Dynamic.LogDataSeparation { + pod.LogDir = filepath.Join(podVolume.LogHostPath, "log") + } + } + } + } + } + + taskConfig := pitr.TaskConfig{ + Namespace: namespace, + PxcName: pxcBackup.Spec.Cluster.Name, + PxcUid: string(pxcBackup.Spec.Cluster.UID), + SinkName: sinkName, + SinkType: sinkType, + HpfsEndpoint: rc.Config().Store().HostPathFileServiceEndpoint(), + FsEndpoint: rc.Config().Store().FilestreamServiceEndpoint(), + BinlogChecksum: binlogChecksum, + HttpServerPort: HttpServerPort, + XStores: xstoreConfigs, + Timestamp: uint64(rc.MustParseRestoreTime().Unix()), + } + return &taskConfig, nil +} + +func generateXStoreTaskConfigs(dnBackups []polarxv1.XStoreBackup) map[string]*pitr.XStoreConfig { + dnConfigs := make(map[string]*pitr.XStoreConfig, len(dnBackups)) + for _, backup := range dnBackups { + xstoreName := backup.Spec.XStore.Name + globalConsistent := true + if strings.HasSuffix(xstoreName, "gms") { + globalConsistent = false + } + dnConfig := pitr.XStoreConfig{ + GlobalConsistent: globalConsistent, + XStoreName: xstoreName, + XStoreUid: string(backup.Spec.XStore.UID), + BackupSetStartIndex: uint64(backup.Status.CommitIndex), + HeartbeatSname: backupbinlog.Sname, + Pods: map[string]*pitr.PodConfig{ + backup.Status.TargetPod: { + PodName: backup.Status.TargetPod, + }, + }, + } + dnConfigs[dnConfig.XStoreName] = &dnConfig + } + return dnConfigs +} + +func newJobLabels(polardbx *polarxv1.PolarDBXCluster) map[string]string { + return map[string]string{ + meta.LabelName: polardbx.Name, + meta.LabelJobType: string(meta.PitrPrepareBinlogJobType), + } +} + +func GetJobAndPod(rc *polardbxv1reconcile.Context) (*batchv1.Job, *corev1.Pod, error) { + polardbx := rc.MustGetPolarDBX() + var job batchv1.Job + jobName := NewJobName(polardbx.Name) + err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: jobName}, &job) + if err != nil { + return nil, nil, errors.Wrap(err, fmt.Sprintf("failed to get job, jobName = %s", jobName)) + } + var podList corev1.PodList + labels := newJobLabels(polardbx) + err = rc.Client().List(rc.Context(), &podList, client.InNamespace(rc.Namespace()), client.MatchingLabels(labels)) + if err != nil { + return nil, nil, errors.Wrap(err, fmt.Sprintf("failed to get job pod jobName = %s", jobName)) + } + if len(podList.Items) > 0 { + pod := podList.Items[0] + err := k8shelper.CheckControllerReference(&pod, &job) + if err != nil { + return nil, nil, errors.Wrap(err, fmt.Sprintf("failed to check pod owner jobName = %s", jobName)) + } + return &job, &pod, nil + } + return &job, nil, errors.New("job pod is not found") +} + +func CreatePrepareBinlogJob(rc *polardbxv1reconcile.Context, config *pitr.TaskConfig) *batchv1.Job { + polardbx := rc.MustGetPolarDBX() + pitrAnnotationConfig := pitr.MustMarshalJSON(config) + labels := newJobLabels(polardbx) + var jobTTL int32 = 30 + var jobParallelism int32 = 1 + var completions int32 = 1 + var backOffLimit int32 = 0 + volumes := []corev1.Volume{ + { + Name: "spill", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + DownwardAPI: &corev1.DownwardAPIVolumeSource{ + Items: []corev1.DownwardAPIVolumeFile{ + { + Path: "config.json", + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: fmt.Sprintf("metadata.annotations['%s']", meta.AnnotationPitrConfig), + }, + }, + }, + }, + }, + }, + } + mountMode := corev1.MountPropagationHostToContainer + container := corev1.Container{ + Name: ContainerName, + Image: rc.Config().Images().DefaultJobImage(), + Command: []string{"/polardbx-job"}, + Args: []string{"-job-type=" + string(meta.PitrPrepareBinlogJobType)}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config", + MountPath: "/workspace/conf/", + ReadOnly: true, + }, + { + Name: "spill", + MountPath: "/workspace/spill", + MountPropagation: &mountMode, + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: PortName, + ContainerPort: HttpServerPort, + }, + }, + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 20, + TimeoutSeconds: 5, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromString(PortName), + Path: "/status", + }, + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + } + job := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: NewJobName(polardbx.Name), + Namespace: rc.Namespace(), + Annotations: map[string]string{ + meta.AnnotationPitrConfig: pitrAnnotationConfig, + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + TTLSecondsAfterFinished: &jobTTL, + Parallelism: &jobParallelism, + Completions: &completions, + BackoffLimit: &backOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + meta.AnnotationPitrConfig: pitrAnnotationConfig, + }, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Volumes: volumes, + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + ImagePullSecrets: polardbx.Spec.Topology.Nodes.DN.Template.ImagePullSecrets, + }, + }, + }, + } + return &job +} diff --git a/pkg/operator/v1/polardbx/steps/instance/pitr/pitr.go b/pkg/operator/v1/polardbx/steps/instance/pitr/pitr.go new file mode 100644 index 0000000..d6ab508 --- /dev/null +++ b/pkg/operator/v1/polardbx/steps/instance/pitr/pitr.go @@ -0,0 +1,138 @@ +package pitr + +import ( + "encoding/json" + "errors" + "fmt" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + polarxv1polarx "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/helper" + polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + polardbxv1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/reconcile" + "net/http" + "time" + + //"github.com/alibaba/polardbx-operator/pkg/pitr" + batchv1 "k8s.io/api/batch/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func IsPitrRestore(polardbx *polardbxv1.PolarDBXCluster) bool { + if helper.IsPhaseIn(polardbx, polarxv1polarx.PhaseRestoring, polarxv1polarx.PhasePending) && polardbx.Spec.Restore != nil && polardbx.Spec.Restore.Time != "" { + return true + } + return false +} + +var LoadLatestBackupSetByTime = polardbxv1reconcile.NewStepBinder("LoadLatestBackupSetByTime", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + if polardbx.Spec.Restore.BackupSet == "" || len(polardbx.Spec.Restore.BackupSet) == 0 { + backup, err := rc.GetLastCompletedPXCBackup(map[string]string{polardbxmeta.LabelName: polardbx.Spec.Restore.From.PolarBDXName}, rc.MustParseRestoreTime()) + if err != nil { + return flow.Error(err, "failed to get last completed pxc backup") + } + polardbx.Spec.Restore.BackupSet = backup.Name + rc.MarkPolarDBXChanged() + } + return flow.Continue("LoadLatestBackupSetByTime continue") + }, +) + +var PreparePitrBinlogs = polardbxv1reconcile.NewStepBinder("PreparePitrBinlogs", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + backup, err := rc.GetPXCBackupByName(polardbx.Spec.Restore.BackupSet) + if err != nil { + return flow.Error(err, "failed to get pxc backup by name", "pxc backup name", polardbx.Spec.Restore.BackupSet, "", backup) + } + var job batchv1.Job + jobName := NewJobName(polardbx.Name) + err = rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: jobName}, &job) + if err != nil { + if apierrors.IsNotFound(err) { + flow.Logger().Info("to create pitr job") + taskConfig, err := CreateTaskConfig(rc, backup) + bytes, err := json.Marshal(taskConfig) + if err != nil { + return flow.Error(err, "failed to json marshal task config") + } + flow.Logger().Info(fmt.Sprintf("pitr task config %s", string(bytes))) + if err != nil { + return flow.RetryErr(err, "failed to task config") + } + job := CreatePrepareBinlogJob(rc, taskConfig) + err = rc.SetControllerRefAndCreate(job) + if err != nil { + flow.RetryErr(err, "failed to job", "jobName", jobName) + } + } else { + return flow.Error(err, "failed to job", "jobName", jobName) + } + } + return flow.Continue("PreparePitrBinlogs continue") + }, +) + +var WaitPreparePitrBinlogs = polardbxv1reconcile.NewStepBinder("WaitPreparePitrBinlogs", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + job, pod, err := GetJobAndPod(rc) + if job != nil && job.Status.Failed > 0 { + //change it to failed phase + polardbxObj := rc.MustGetPolarDBX() + polardbxObj.Status.Phase = polarxv1polarx.PhaseFailed + rc.UpdatePolarDBXStatus() + return flow.Error(errors.New("changed to failed phase"), "") + } + if err != nil { + return flow.RetryErr(err, "failed to job and pod") + } + if len(pod.Status.ContainerStatuses) > 0 { + ready := true + for _, containerStatus := range pod.Status.ContainerStatuses { + ready = ready && containerStatus.Ready + } + if ready { + polardbxObj := rc.MustGetPolarDBX() + port := k8shelper.MustGetPortFromContainer( + k8shelper.MustGetContainerFromPod(pod, ContainerName), + PortName, + ).ContainerPort + polardbxObj.Status.PitrStatus = &polarxv1polarx.PitrStatus{ + PrepareJobEndpoint: fmt.Sprintf("http://%s:%d", pod.Status.PodIP, port), + Job: job.Name, + } + return flow.Continue("The container is ready") + } + } + return flow.Retry("The container is not ready") + }, +) + +var CleanPreparePitrBinlogJob = polardbxv1reconcile.NewStepBinder("CleanPreparePitrBinlogJob", + func(rc *polardbxv1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + polardbx := rc.MustGetPolarDBX() + if polardbx.Status.PitrStatus != nil { + pitrEndpoint := polardbx.Status.PitrStatus.PrepareJobEndpoint + if pitrEndpoint != "" { + exitUrl := pitrEndpoint + "/exit" + httpClient := http.Client{ + Timeout: 2 * time.Second, + } + _, err := httpClient.Get(pitrEndpoint + "/exit") + if err != nil { + flow.Logger().Error(err, fmt.Sprintf("fail send exit url = %s", exitUrl)) + go func() { + time.Sleep(3 * time.Second) + http.Get(exitUrl) + }() + } + } + } + return flow.Continue("CleanPreparePitrBinlogJob continue") + }, +) diff --git a/pkg/operator/v1/systemtask/common/adaptor.go b/pkg/operator/v1/systemtask/common/adaptor.go new file mode 100644 index 0000000..c1ef6a2 --- /dev/null +++ b/pkg/operator/v1/systemtask/common/adaptor.go @@ -0,0 +1,35 @@ +package common + +import ( + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ConditionFunc func(rc *Context, log logr.Logger) (bool, error) +type StepFunc func(rc *Context, flow control.Flow) (reconcile.Result, error) + +func NewStepBinder(name string, f StepFunc) control.BindFunc { + return control.NewStepBinder( + control.NewStep( + name, func(rc control.ReconcileContext, flow control.Flow) (reconcile.Result, error) { + return f(rc.(*Context), flow) + }, + ), + ) +} + +func NewStepIfBinder(conditionName string, condFunc ConditionFunc, binders ...control.BindFunc) control.BindFunc { + condition := control.NewCachedCondition( + control.NewCondition(conditionName, func(rc control.ReconcileContext, log logr.Logger) (bool, error) { + return condFunc(rc.(*Context), log) + }), + ) + + ifBinders := make([]control.BindFunc, len(binders)) + for i := range binders { + ifBinders[i] = control.NewStepIfBinder(condition, control.ExtractStepsFromBindFunc(binders[i])[0]) + } + + return control.CombineBinders(ifBinders...) +} diff --git a/pkg/operator/v1/systemtask/common/base_reconciler.go b/pkg/operator/v1/systemtask/common/base_reconciler.go new file mode 100644 index 0000000..4769007 --- /dev/null +++ b/pkg/operator/v1/systemtask/common/base_reconciler.go @@ -0,0 +1,10 @@ +package common + +import ( + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Reconciler interface { + Reconcile(rc *Context, log logr.Logger, request reconcile.Request) (reconcile.Result, error) +} diff --git a/pkg/operator/v1/systemtask/common/common.go b/pkg/operator/v1/systemtask/common/common.go new file mode 100644 index 0000000..7db9744 --- /dev/null +++ b/pkg/operator/v1/systemtask/common/common.go @@ -0,0 +1,21 @@ +package common + +import "github.com/alibaba/polardbx-operator/api/v1/systemtask" + +// reconciler name + +var ( + registeredMap = make(map[systemtask.Type]Reconciler, 0) +) + +func Register(taskType systemtask.Type, reconciler Reconciler) { + registeredMap[taskType] = reconciler +} + +func MusterFindReconciler(taskType systemtask.Type) Reconciler { + reconciler, ok := registeredMap[taskType] + if !ok { + panic("Failed to find system task reconciler, task type : " + taskType) + } + return reconciler +} diff --git a/pkg/operator/v1/systemtask/common/context.go b/pkg/operator/v1/systemtask/common/context.go new file mode 100644 index 0000000..1485c34 --- /dev/null +++ b/pkg/operator/v1/systemtask/common/context.go @@ -0,0 +1,198 @@ +package common + +import ( + "fmt" + v1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + "github.com/alibaba/polardbx-operator/pkg/k8s/cache" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Context struct { + *control.BaseReconcileContext + + // Caches + taskKey types.NamespacedName + taskChanged bool + systemTask *v1.SystemTask + objectCache cache.ObjectLoadingCache + + // Hint cache + controllerHints []string + // Config + configLoader func() config.Config + + //balance resource + brTargetPod *corev1.Pod + brTargetNodeName string +} + +func (ctx *Context) ConfigLoader() func() config.Config { + return ctx.configLoader +} + +func (ctx *Context) BrTargetPod() *corev1.Pod { + return ctx.brTargetPod +} + +func (ctx *Context) SetBrTargetPod(brTargetPod *corev1.Pod) { + ctx.brTargetPod = brTargetPod +} + +func (ctx *Context) BrTargetNodeName() string { + return ctx.brTargetNodeName +} + +func (ctx *Context) SetBrTargetNodeName(brTargetNodeName string) { + ctx.brTargetNodeName = brTargetNodeName +} + +func NewContext(base *control.BaseReconcileContext, configLoader func() config.Config) *Context { + return &Context{ + BaseReconcileContext: base, + objectCache: cache.NewObjectCache(base.Client(), base.Scheme()), + configLoader: configLoader, + } +} + +func (ctx *Context) SetKey(taskKey types.NamespacedName) { + ctx.taskKey = taskKey +} + +func (ctx *Context) MustGetSystemTask() *v1.SystemTask { + if ctx.systemTask == nil { + var systemTask v1.SystemTask + err := ctx.Client().Get(ctx.Context(), ctx.taskKey, &systemTask) + if err != nil { + panic(err) + } + if systemTask.Status.StBalanceResourceStatus == nil { + systemTask.Status.StBalanceResourceStatus = &systemtask.StBalanceResourceStatus{} + } + ctx.systemTask = &systemTask + } + return ctx.systemTask +} + +func (ctx *Context) IsSystemTaskChanged() bool { + return ctx.taskChanged +} + +func (ctx *Context) MarkSystemTaskChanged() { + ctx.taskChanged = true + return +} + +func (ctx *Context) UpdateSystemTask() error { + err := ctx.Client().Update(ctx.Context(), ctx.systemTask) + return err +} + +func (ctx *Context) GetAllXStores() ([]v1.XStore, error) { + var xstoreList v1.XStoreList + err := ctx.Client().List(ctx.Context(), &xstoreList, client.InNamespace(ctx.Namespace())) + return xstoreList.Items, err +} + +func (ctx *Context) GetXStoreByName(name string) (*v1.XStore, error) { + var xstore v1.XStore + objKey := types.NamespacedName{ + Name: name, + Namespace: ctx.Namespace(), + } + err := ctx.Client().Get(ctx.Context(), objKey, &xstore) + if err != nil { + return nil, err + } + return &xstore, nil +} + +func (ctx *Context) GetAllXStorePods() ([]corev1.Pod, error) { + pods := make([]corev1.Pod, 0) + for _, role := range []string{polardbxmeta.RoleDN, polardbxmeta.RoleGMS} { + var podList corev1.PodList + err := ctx.Client().List(ctx.Context(), &podList, client.InNamespace(ctx.Namespace()), client.MatchingLabels(map[string]string{ + polardbxmeta.LabelRole: role, + })) + if err != nil { + return nil, err + } + pods = append(pods, podList.Items...) + } + return pods, nil +} + +func (ctx *Context) GetNodeXStorePodMap(separateRole bool, logger bool) (map[string][]corev1.Pod, error) { + pods, err := ctx.GetAllXStorePods() + if err != nil { + return nil, err + } + newPods := make([]corev1.Pod, 0) + if separateRole { + for _, pod := range pods { + if logger { + if xstoremeta.IsPodRoleVoter(&pod) { + newPods = append(newPods, pod) + } + } else { + if !xstoremeta.IsPodRoleVoter(&pod) { + newPods = append(newPods, pod) + } + } + } + } else { + newPods = pods + } + + result := make(map[string][]corev1.Pod) + for _, pod := range newPods { + nodeName := pod.Spec.NodeName + if nodeName == "" { + return nil, fmt.Errorf("node name not found, pod name: %s", pod.Name) + } + nodePods, ok := result[nodeName] + if !ok { + nodePods = make([]corev1.Pod, 0) + } + result[nodeName] = append(nodePods, pod) + } + nodes, err := ctx.GetAllNodes() + if err != nil { + return nil, err + } + for _, node := range nodes { + _, ok := result[node.Name] + if !ok { + result[node.Name] = make([]corev1.Pod, 0) + } + } + return result, nil +} + +func (ctx *Context) GetAllNodes() ([]corev1.Node, error) { + var nodeList corev1.NodeList + err := ctx.Client().List(ctx.Context(), &nodeList) + return nodeList.Items, err +} + +func (rc *Context) SetControllerRef(obj client.Object) error { + if obj == nil { + return nil + } + systemTask := rc.MustGetSystemTask() + return ctrl.SetControllerReference(systemTask, obj, rc.Scheme()) +} + +func (rc *Context) SetControllerRefAndCreate(obj client.Object) error { + if err := rc.SetControllerRef(obj); err != nil { + return err + } + return rc.Client().Create(rc.Context(), obj) +} diff --git a/pkg/operator/v1/systemtask/common/label.go b/pkg/operator/v1/systemtask/common/label.go new file mode 100644 index 0000000..b9a6e2a --- /dev/null +++ b/pkg/operator/v1/systemtask/common/label.go @@ -0,0 +1,5 @@ +package common + +const ( + LabelBalanceResource = "systemtask/balance-resource" +) diff --git a/pkg/operator/v1/systemtask/controllers/systemtask_controller.go b/pkg/operator/v1/systemtask/controllers/systemtask_controller.go new file mode 100644 index 0000000..aa9f932 --- /dev/null +++ b/pkg/operator/v1/systemtask/controllers/systemtask_controller.go @@ -0,0 +1,64 @@ +package controllers + +import ( + "context" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/hint" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/common" + resource_balance "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/reconcile" + "github.com/go-logr/logr" + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "time" +) + +func init() { + common.Register(systemtask.BalanceResource, &resource_balance.ResourceBalanceReconciler{}) +} + +type SystemTaskReconciler struct { + BaseRc *control.BaseReconcileContext + Logger logr.Logger + config.LoaderFactory + + MaxConcurrency int +} + +func (r *SystemTaskReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := r.Logger.WithValues("namespace", request.Namespace, "systemtask", request.Name) + + if hint.IsNamespacePaused(request.Namespace) { + log.Info("Reconciling is paused, skip") + return reconcile.Result{}, nil + } + rc := common.NewContext( + control.NewBaseReconcileContextFrom(r.BaseRc, ctx, request), + r.LoaderFactory(), + ) + rc.SetKey(request.NamespacedName) + defer rc.Close() + + systemTask := rc.MustGetSystemTask() + reconciler := common.MusterFindReconciler(systemTask.Spec.TaskType) + return reconciler.Reconcile(rc, log.WithValues("SystemTaskType", systemTask.Spec.TaskType), request) +} + +func (r *SystemTaskReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{ + MaxConcurrentReconciles: r.MaxConcurrency, + RateLimiter: workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 10*time.Second), + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, + ), + }). + For(&polardbxv1.SystemTask{}). + Complete(r) +} diff --git a/pkg/operator/v1/systemtask/reconcile/resource_balance_reconciler.go b/pkg/operator/v1/systemtask/reconcile/resource_balance_reconciler.go new file mode 100644 index 0000000..6a31bd6 --- /dev/null +++ b/pkg/operator/v1/systemtask/reconcile/resource_balance_reconciler.go @@ -0,0 +1,40 @@ +package resource_balance + +import ( + v1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/common" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/steps" + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ResourceBalanceReconciler struct { +} + +func (r *ResourceBalanceReconciler) Reconcile(rc *common.Context, log logr.Logger, request reconcile.Request) (reconcile.Result, error) { + systemTask := rc.MustGetSystemTask() + log = log.WithValues("phase", systemTask.Status.Phase) + task := r.newReconcileTask(rc, systemTask, log) + return control.NewExecutor(log).Execute(rc, task) +} + +func (r *ResourceBalanceReconciler) newReconcileTask(rc *common.Context, systemTask *v1.SystemTask, log logr.Logger) *control.Task { + task := control.NewTask() + // Deferred steps, will always be executed in the deferred sequence. + defer steps.PersistentSystemTask(task, true) + switch systemTask.Status.Phase { + case systemtask.InitPhase: + steps.CheckAllXStoreHealth(task) + steps.TransferPhaseTo(systemtask.RebuildTaskPhase, true)(task) + case systemtask.RebuildTaskPhase: + steps.CreateBalanceTaskIfNeed(task) + control.When(steps.IsRebuildFinish(rc), steps.TransferPhaseTo(systemtask.BalanceRolePhase, true))(task) + case systemtask.BalanceRolePhase: + steps.BalanceRole(task) + case systemtask.SuccessPhase: + + } + return task +} diff --git a/pkg/operator/v1/systemtask/steps/balance_resource.go b/pkg/operator/v1/systemtask/steps/balance_resource.go new file mode 100644 index 0000000..ad35299 --- /dev/null +++ b/pkg/operator/v1/systemtask/steps/balance_resource.go @@ -0,0 +1,437 @@ +package steps + +import ( + "bytes" + "errors" + v1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + "github.com/alibaba/polardbx-operator/api/v1/xstore" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/common" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" + xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" + xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" + xstoreinstance "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/steps/instance" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "math" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "time" +) + +const RebuildTaskName = "res-balance-rebuild" + +func getMinCountNodePods(nodePodMap map[string][]corev1.Pod) (string, []corev1.Pod) { + var nodeName string + var pods []corev1.Pod + podLen := math.MaxInt + for k, v := range nodePodMap { + if len(v) < podLen { + nodeName = k + pods = v + podLen = len(pods) + } + } + return nodeName, pods +} + +func getMaxCountNodePods(nodePodMap map[string][]corev1.Pod) (string, []corev1.Pod) { + var nodeName string + var pods []corev1.Pod + podLen := math.MinInt + for k, v := range nodePodMap { + if len(v) > podLen { + nodeName = k + pods = v + podLen = len(pods) + } + } + return nodeName, pods +} + +func getTargetXStorePod(rc *common.Context, logger bool) (*corev1.Pod, string, error) { + nodePodMap, err := rc.GetNodeXStorePodMap(true, logger) + if err != nil { + return nil, "", err + } + //not logger + _, maxCountPods := getMaxCountNodePods(nodePodMap) + minCountNodeName, minCountPods := getMinCountNodePods(nodePodMap) + var targetPod *corev1.Pod + if len(maxCountPods)-len(minCountPods) > 1 { + xstoreNameMap := make(map[string]bool) + for _, pod := range minCountPods { + xstoreNameMap[pod.Labels[xstoremeta.LabelName]] = true + } + for _, pod := range maxCountPods { + _, ok := xstoreNameMap[pod.Labels[xstoremeta.LabelName]] + if !ok { + targetPod = &pod + break + } + } + } + return targetPod, minCountNodeName, nil +} + +func getTargetXStorePodAllRole(rc *common.Context) (*corev1.Pod, string, error) { + nodePodMap, err := rc.GetNodeXStorePodMap(false, false) + if err != nil { + return nil, "", err + } + _, maxCountPods := getMaxCountNodePods(nodePodMap) + minCountNodeName, minCountPods := getMinCountNodePods(nodePodMap) + var targetPod *corev1.Pod + if len(maxCountPods)-len(minCountPods) > 1 { + xstoreNameMap := make(map[string]bool) + for _, pod := range minCountPods { + xstoreNameMap[pod.Labels[xstoremeta.LabelName]] = true + } + for _, pod := range maxCountPods { + _, ok := xstoreNameMap[pod.Labels[xstoremeta.LabelName]] + if !ok { + // check if logger + if xstoremeta.IsPodRoleVoter(&pod) { + targetPod = &pod + break + } + } + } + } + return targetPod, minCountNodeName, nil +} + +func newXStoreFollower(rc *common.Context, targetPod *corev1.Pod, targetNodeName string) (*v1.XStoreFollower, error) { + xstoreName := targetPod.Labels[xstoremeta.LabelName] + xstore, err := rc.GetXStoreByName(xstoreName) + if err != nil { + return nil, err + } + if xstore.Spec.PrimaryXStore != "" { + xstoreName = xstore.Spec.PrimaryXStore + } + rebuildTask := v1.XStoreFollower{ + ObjectMeta: metav1.ObjectMeta{ + Name: RebuildTaskName, + Namespace: rc.Namespace(), + Labels: map[string]string{ + common.LabelBalanceResource: "true", + }, + }, + Spec: v1.XStoreFollowerSpec{ + Local: false, + NodeName: targetNodeName, + TargetPodName: targetPod.Name, + XStoreName: xstoreName, + }, + } + return &rebuildTask, nil +} + +var CreateBalanceTaskIfNeed = common.NewStepBinder("CreateBalanceTaskIfNeed", + func(rc *common.Context, flow control.Flow) (reconcile.Result, error) { + systemTask := rc.MustGetSystemTask() + + var xstoreFollowerList v1.XStoreFollowerList + err := rc.Client().List(rc.Context(), &xstoreFollowerList, client.InNamespace(rc.Namespace()), client.MatchingLabels(map[string]string{ + common.LabelBalanceResource: "true", + })) + if err != nil { + return flow.RetryErr(err, "failed to get xstore follower list") + } + if len(xstoreFollowerList.Items) > 0 { + for _, xf := range xstoreFollowerList.Items { + if xf.Status.Phase == xstore.FollowerPhaseSuccess { + rc.Client().Delete(rc.Context(), &xf) + } + } + return flow.RetryAfter(time.Second, "try for xstore follower status") + } + if systemTask.Status.StBalanceResourceStatus.RebuildFinish { + return flow.Pass() + } + var targetPod *corev1.Pod + var targetNode string + targetPod, targetNode, err = getTargetXStorePod(rc, false) + if err != nil { + return flow.RetryErr(err, "failed to get xstore pod", "logger", false) + } + if targetPod == nil { + targetPod, targetNode, err = getTargetXStorePod(rc, true) + if err != nil { + return flow.RetryErr(err, "failed to get xstore pod", "logger", false) + } + } + + if targetPod == nil { + targetPod, targetNode, err = getTargetXStorePodAllRole(rc) + if err != nil { + return flow.RetryErr(err, "failed to get xstore pod", "all role", true) + } + } + + rc.MarkSystemTaskChanged() + if targetPod == nil { + systemTask.Status.StBalanceResourceStatus.RebuildFinish = true + return flow.Pass() + } + systemTask.Status.StBalanceResourceStatus.RebuildTaskName = RebuildTaskName + //kill mysqld in the target pod, ensure not leader + cmd := command.NewCanonicalCommandBuilder().Process().KillAllMyProcess().Build() + buf := &bytes.Buffer{} + err = rc.ExecuteCommandOn(targetPod, convention.ContainerEngine, cmd, control.ExecOptions{ + Logger: flow.Logger(), + Stdout: buf, + Timeout: 8 * time.Second, + }) + rebuildTask, err := newXStoreFollower(rc, targetPod, targetNode) + if err != nil { + return flow.RetryErr(err, "Failed to create xstore follower task") + } + err = rc.SetControllerRefAndCreate(rebuildTask) + if err != nil { + return flow.RetryErr(err, "Failed to Create rebuild task", "task name", RebuildTaskName) + } + return flow.Retry("CreateBalanceTaskIfNeed Success") + }) + +func IsRebuildFinish(rc *common.Context) bool { + systemTask := rc.MustGetSystemTask() + return systemTask.Status.StBalanceResourceStatus.RebuildFinish +} + +var CheckAllXStoreHealth = common.NewStepBinder("CheckAllXStoreHealth", + func(rc *common.Context, flow control.Flow) (reconcile.Result, error) { + //check if xstore has leader follower logger + var xstores v1.XStoreList + err := rc.Client().List(rc.Context(), &xstores, client.InNamespace(rc.Namespace())) + if err != nil { + return flow.RetryErr(err, "Failed to") + } + systemTask := rc.MustGetSystemTask() + if len(xstores.Items) == 0 { + systemTask.Status.Phase = systemtask.SuccessPhase + rc.MarkSystemTaskChanged() + return flow.Pass() + } + for _, xstore := range xstores.Items { + //fetch pods of this xstore + var podList corev1.PodList + err := rc.Client().List(rc.Context(), &podList, client.InNamespace(rc.Namespace()), client.MatchingLabels{ + xstoremeta.LabelName: xstore.Name, + }) + if err != nil { + return flow.RetryErr(err, "failed to list pods", "xstore", xstore.Name) + } + markMap := make(map[string]bool) + for _, pod := range podList.Items { + markMap[pod.Name] = true + } + if len(markMap) < 3 { + return flow.RetryErr(errors.New("UnhealthXstore"), "unhealth xstore", "xstore name", xstore.Name) + } + } + return flow.Pass() + }) + +type MyNode struct { + Name string + CandNum int + LeaderNum int + Neighbors map[string]*MyNode + Pods []corev1.Pod +} + +func isLeaderPod(rc *common.Context, pod corev1.Pod, logger logr.Logger) (bool, error) { + xstoreRequest := rc.Request() + xstoreRequest.Name = pod.Labels[xstoremeta.LabelName] + xstoreContext := xstorev1reconcile.NewContext( + control.NewBaseReconcileContextFrom(rc.BaseReconcileContext, rc.Context(), xstoreRequest), + rc.ConfigLoader(), + ) + role, _, err := xstoreinstance.ReportRoleAndCurrentLeader(xstoreContext, &pod, logger) + if err != nil { + return false, err + } + return role == xstoremeta.RoleLeader, nil +} + +func BuildMyNodeFromPods(rc *common.Context, pods []corev1.Pod, logger logr.Logger) (map[string]*MyNode, error) { + xstoreToPods := make(map[string][]corev1.Pod) + nodes := make(map[string]*MyNode) + for _, pod := range pods { + if !xstoremeta.IsPodRoleCandidate(&pod) { + continue + } + xstoreName := pod.Labels[xstoremeta.LabelName] + xstorePods, ok := xstoreToPods[xstoreName] + if !ok { + xstorePods = make([]corev1.Pod, 0) + } + xstorePods = append(xstorePods, pod) + xstoreToPods[xstoreName] = xstorePods + nodeName := pod.Spec.NodeName + _, ok = nodes[nodeName] + if !ok { + nodes[nodeName] = &MyNode{ + Name: nodeName, + CandNum: 0, + LeaderNum: 0, + Neighbors: make(map[string]*MyNode), + Pods: make([]corev1.Pod, 0), + } + } + nodes[nodeName].Pods = append(nodes[nodeName].Pods, pod) + nodes[nodeName].CandNum = nodes[nodeName].CandNum + 1 + isLeader, err := isLeaderPod(rc, pod, logger) + if err != nil { + return nil, err + } + if isLeader { + nodes[nodeName].LeaderNum = nodes[nodeName].LeaderNum + 1 + } + } + for _, v := range xstoreToPods { + neighborNodes := make([]*MyNode, 0) + for _, pod := range v { + nodeName := pod.Spec.NodeName + node := nodes[nodeName] + neighborNodes = append(neighborNodes, node) + } + for i := 0; i < len(neighborNodes)-1; i++ { + node := neighborNodes[i] + for j := i + 1; j < len(neighborNodes); j++ { + nodeInner := neighborNodes[j] + node.Neighbors[nodeInner.Name] = nodeInner + nodeInner.Neighbors[node.Name] = node + } + } + } + return nodes, nil +} + +func getMinCountNode(nodes map[string]*MyNode, cntFunc func(node *MyNode) int) (map[string]*MyNode, int) { + var cnt int = math.MaxInt + for _, v := range nodes { + if cntFunc(v) < cnt { + cnt = cntFunc(v) + } + } + myNodes := map[string]*MyNode{} + for _, v := range nodes { + if cntFunc(v) == cnt { + myNodes[v.Name] = v + } + } + return myNodes, cnt +} + +func VisitNode(nodes map[string]*MyNode, minLeaderCount int, visitedNodes map[string]bool, leader bool, minCandPodCount int) (*MyNode, *MyNode, bool) { + for _, node := range nodes { + _, ok := visitedNodes[node.Name] + if ok { + continue + } + visitedNodes[node.Name] = true + if leader { + if node.LeaderNum-minLeaderCount >= 2 { + return node, nil, true + } + } else { + if minLeaderCount == -1 { + minLeaderCount = node.LeaderNum + } + if minCandPodCount == -1 { + minCandPodCount = node.CandNum + } + if (node.CandNum > minCandPodCount && node.LeaderNum > minLeaderCount) || node.LeaderNum-minLeaderCount >= 2 { + return node, nil, true + } + } + + fromNode, toNode, found := VisitNode(node.Neighbors, minLeaderCount, visitedNodes, leader, minCandPodCount) + if found { + if toNode == nil { + toNode = node + } + return fromNode, toNode, true + } + } + return nil, nil, false +} + +func changeLeader(rc *common.Context, leaderPod corev1.Pod, targetPod corev1.Pod, logger logr.Logger) { + cmd := command.NewCanonicalCommandBuilder().Consensus().SetLeader(targetPod.Name).Build() + rc.ExecuteCommandOn(&leaderPod, convention.ContainerEngine, cmd, control.ExecOptions{ + Logger: logger, + Timeout: 8 * time.Second, + }) +} + +var BalanceRole = common.NewStepBinder("BalanceRole", + func(rc *common.Context, flow control.Flow) (reconcile.Result, error) { + systemTask := rc.MustGetSystemTask() + xstorePods, err := rc.GetAllXStorePods() + if err != nil { + return flow.RetryErr(err, "Failed to get xstore pods") + } + myNodes, err := BuildMyNodeFromPods(rc, xstorePods, flow.Logger()) + if err != nil { + return flow.RetryErr(err, "Failed to BuildMyNodeFromPods") + } + minLeaderCountNodes, minLeaderCount := getMinCountNode(myNodes, func(node *MyNode) int { + return node.LeaderNum + }) + visitedNodes := map[string]bool{} + fromNode, toNode, found := VisitNode(minLeaderCountNodes, minLeaderCount, visitedNodes, true, -1) + if systemTask.Status.StBalanceResourceStatus.BalanceLeaderFinish || !found { + systemTask.Status.StBalanceResourceStatus.BalanceLeaderFinish = true + rc.MarkSystemTaskChanged() + minCandCountNodes, _ := getMinCountNode(myNodes, func(node *MyNode) int { + return node.CandNum + }) + allMinCountNodes := map[string]*MyNode{} + for k, v := range minCandCountNodes { + _, ok := minLeaderCountNodes[k] + if ok { + allMinCountNodes[k] = v + } + } + visitedNodes = map[string]bool{} + fromNode, toNode, found = VisitNode(allMinCountNodes, -1, visitedNodes, false, -1) + } + if !found { + systemTask.Status.Phase = systemtask.SuccessPhase + rc.MarkSystemTaskChanged() + return flow.Retry("BalanceRole Finishes") + } + // select xstore pod + xstoreLeaderPodMap := map[string]corev1.Pod{} + for _, fromNodePod := range fromNode.Pods { + leader, err := isLeaderPod(rc, fromNodePod, flow.Logger()) + if err != nil { + return flow.RetryErr(err, "Fail check if leader") + } + if leader { + xstoreLeaderPodMap[fromNodePod.Labels[xstoremeta.LabelName]] = fromNodePod + } + } + var leaderPod corev1.Pod + var targetNodePod corev1.Pod + var ok bool + for _, toNodePod := range toNode.Pods { + xstoreName := toNodePod.Labels[xstoremeta.LabelName] + leaderPod, ok = xstoreLeaderPodMap[xstoreName] + if ok { + targetNodePod = toNodePod + break + } + } + if targetNodePod.Name != "" { + changeLeader(rc, leaderPod, targetNodePod, flow.Logger()) + } + return flow.Retry("Retry BalanceRole") + }) diff --git a/pkg/operator/v1/systemtask/steps/status.go b/pkg/operator/v1/systemtask/steps/status.go new file mode 100644 index 0000000..b57511d --- /dev/null +++ b/pkg/operator/v1/systemtask/steps/status.go @@ -0,0 +1,33 @@ +package steps + +import ( + "github.com/alibaba/polardbx-operator/api/v1/systemtask" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/systemtask/common" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var PersistentSystemTask = common.NewStepBinder("PersistentSystemTask", + func(rc *common.Context, flow control.Flow) (reconcile.Result, error) { + if rc.IsSystemTaskChanged() { + if err := rc.UpdateSystemTask(); err != nil { + return flow.Error(err, "Unable to persistent system task.") + } + return flow.Continue("Succeeds to persistent system task.") + } + return flow.Continue("Object not changed.") + }) + +func TransferPhaseTo(phase systemtask.Phase, requeue bool) control.BindFunc { + return common.NewStepBinder("TransferPhaseTo"+string(phase), + func(rc *common.Context, flow control.Flow) (reconcile.Result, error) { + systemTask := rc.MustGetSystemTask() + systemTask.Status.Phase = phase + rc.MarkSystemTaskChanged() + if requeue { + return flow.Retry("Retry immediately.") + } + return flow.Pass() + }, + ) +} diff --git a/pkg/operator/v1/xstore/change/driver/exec/update_executor.go b/pkg/operator/v1/xstore/change/driver/exec/update_executor.go index 89f718a..0fe4b22 100644 --- a/pkg/operator/v1/xstore/change/driver/exec/update_executor.go +++ b/pkg/operator/v1/xstore/change/driver/exec/update_executor.go @@ -88,7 +88,7 @@ func (exec *UpdateExec) Execute(rc *xstorev1reconcile.Context, flow control.Flow return flow.Error(err, "Failed to construct new pod", "pod", target) } pod.Labels[xstoremeta.LabelGeneration] = strconv.FormatInt(step.TargetGeneration, 10) - pod.Spec.NodeName = step.OriginHost + pod.Spec.NodeName = exec.ec.Volumes[pod.Name].Host if err := rc.SetControllerRefAndCreate(pod); err != nil { return flow.Error(err, "Failed to create pod", "pod", target) diff --git a/pkg/operator/v1/xstore/change/driver/planner/planner.go b/pkg/operator/v1/xstore/change/driver/planner/planner.go index f9365ae..a33f1ec 100644 --- a/pkg/operator/v1/xstore/change/driver/planner/planner.go +++ b/pkg/operator/v1/xstore/change/driver/planner/planner.go @@ -80,8 +80,10 @@ func (p *Planner) runningGenerations() (map[int64]int, error) { func (p *Planner) buildExpectedNodes() map[string]model.PaxosNode { topology := &p.xstore.Spec.Topology + generation := p.xstore.Generation if p.selfHeal { topology = p.xstore.Status.ObservedTopology + generation = p.xstore.Status.ObservedGeneration } nodes := make(map[string]model.PaxosNode) @@ -93,7 +95,7 @@ func (p *Planner) buildExpectedNodes() map[string]model.PaxosNode { PaxosInnerNode: model.PaxosInnerNode{ Pod: name, Role: strings.ToLower(string(ns.Role)), - Generation: p.xstore.Generation, + Generation: generation, Set: ns.Name, Index: i, }, diff --git a/pkg/operator/v1/xstore/command/commands.go b/pkg/operator/v1/xstore/command/commands.go index c617191..23e1552 100644 --- a/pkg/operator/v1/xstore/command/commands.go +++ b/pkg/operator/v1/xstore/command/commands.go @@ -143,7 +143,7 @@ func (b *commandConsensusBuilder) PurgeLogs(local, force bool) *CommandBuilder { } func (b *commandConsensusBuilder) SetLeader(pod string) *CommandBuilder { - b.args = append(b.args, "change-leader", "--node", pod) + b.args = append(b.args, "change-leader", pod) return b.end() } @@ -157,6 +157,31 @@ func (b *commandConsensusBuilder) DropLearner(pod string) *CommandBuilder { return b.end() } +func (b *commandConsensusBuilder) DisableElection() *CommandBuilder { + b.args = append(b.args, "disable-election") + return b.end() +} + +func (b *commandConsensusBuilder) EnableElection() *CommandBuilder { + b.args = append(b.args, "enable-election") + return b.end() +} + +func (b *commandConsensusBuilder) UpdateClusterInfo(clusterInfo string) *CommandBuilder { + b.args = append(b.args, "update-cluster-info", clusterInfo) + return b.end() +} + +func (b *commandConsensusBuilder) PrepareHandleIndicate(action string) *CommandBuilder { + b.args = append(b.args, "prepare-handle-indicate", action) + return b.end() +} + +func (b *commandConsensusBuilder) ChangeLearnerToFollower(node string) *CommandBuilder { + b.args = append(b.args, "learner-to-follower", node) + return b.end() +} + func (b *commandConsensusBuilder) ChangeRole(pod, from, to string) *CommandBuilder { b.args = append(b.args, "change", "--node", pod, "--from-role", from, "--to-role", to) return b.end() @@ -183,6 +208,11 @@ func (b *commandConsensusBuilder) ForceSingleMode() *CommandBuilder { return b.end() } +func (b *commandConsensusBuilder) SetReadonly() *CommandBuilder { + b.args = append(b.args, "set-readonly") + return b.end() +} + type commandAccountBuilder struct { *commandBuilder } @@ -304,6 +334,11 @@ func (b *commandEngineBuilder) Version() *CommandBuilder { return b.end() } +func (b *commandEngineBuilder) Shutdown() *CommandBuilder { + b.args = append(b.args, "shutdown") + return b.end() +} + type commandProcessBuilder struct { *commandBuilder } @@ -321,6 +356,11 @@ func (b *commandProcessBuilder) KillAllMyProcess() *CommandBuilder { return b.end() } +func (b *commandProcessBuilder) KillMySQLDProcess() *CommandBuilder { + b.args = append(b.args, "kill_mysqld") + return b.end() +} + type commandBackupBuilder struct { *commandBuilder } diff --git a/pkg/operator/v1/xstore/controllers/xstore_backup_controller.go b/pkg/operator/v1/xstore/controllers/xstore_backup_controller.go index 55a2c5a..8a35887 100644 --- a/pkg/operator/v1/xstore/controllers/xstore_backup_controller.go +++ b/pkg/operator/v1/xstore/controllers/xstore_backup_controller.go @@ -22,6 +22,7 @@ import ( "github.com/alibaba/polardbx-operator/pkg/k8s/control" "github.com/alibaba/polardbx-operator/pkg/operator/hint" "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/plugin" xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" "github.com/go-logr/logr" @@ -45,7 +46,7 @@ type XStoreBackupReconciler struct { } func (r *XStoreBackupReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log := r.Logger.WithValues("namespace", request.Namespace, "xstore", request.Name) + log := r.Logger.WithValues("namespace", request.Namespace, "xstore-backup", request.Name) if hint.IsNamespacePaused(request.Namespace) { log.Info("Reconciling is paused, skip") @@ -91,6 +92,12 @@ func (r *XStoreBackupReconciler) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, nil } + // check whether backup is dummy + if xstoreBackup.Annotations[meta.AnnotationDummyBackup] == "true" { + log.Info("Dummy xstore backup, skip") + return reconcile.Result{}, nil + } + return reconciler.Reconcile(rc, log.WithValues("engine", engine), request) } @@ -100,8 +107,8 @@ func (r *XStoreBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 300*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&xstorev1.XStoreBackup{}). diff --git a/pkg/operator/v1/xstore/controllers/xstore_controller.go b/pkg/operator/v1/xstore/controllers/xstore_controller.go index 005bf37..370ce23 100644 --- a/pkg/operator/v1/xstore/controllers/xstore_controller.go +++ b/pkg/operator/v1/xstore/controllers/xstore_controller.go @@ -91,8 +91,8 @@ func (r *XStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 60*time.Second), - // 60 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.XStore{}). diff --git a/pkg/operator/v1/xstore/controllers/xstore_follower_controller.go b/pkg/operator/v1/xstore/controllers/xstore_follower_controller.go index 8f650cf..afe59ee 100644 --- a/pkg/operator/v1/xstore/controllers/xstore_follower_controller.go +++ b/pkg/operator/v1/xstore/controllers/xstore_follower_controller.go @@ -174,8 +174,8 @@ func (r *XStoreFollowerReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: r.MaxConcurrency, RateLimiter: workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed. It's only the overall factor (not per item). - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 100)}, + // 60 qps, 10 bucket size. This is only for retry speed. It's only the overall factor (not per item). + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(60), 10)}, ), }). For(&polardbxv1.XStoreFollower{}). diff --git a/pkg/operator/v1/xstore/convention/convention.go b/pkg/operator/v1/xstore/convention/convention.go index 84bca90..f43eb53 100644 --- a/pkg/operator/v1/xstore/convention/convention.go +++ b/pkg/operator/v1/xstore/convention/convention.go @@ -19,6 +19,9 @@ package convention import ( "errors" "fmt" + "github.com/alibaba/polardbx-operator/pkg/util/name" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/rand" "strconv" "strings" @@ -68,6 +71,10 @@ func NewServiceName(xstore *polardbxv1.XStore, serviceType ServiceType) string { panic("invalid service type: " + serviceType) } +func NewXstorePodServiceName(pod *corev1.Pod) string { + return pod.Name + "-service" +} + // Conventions for port names. const ( @@ -223,8 +230,8 @@ const ( FileStreamRootDir = "/filestream" XClusterBackupBinFilepath = "/u01/xcluster_xtrabackup/bin/innobackupex" XClusterBackupSetPrepareArg = "--apply-log" - GalaxyEngineBackupSlaveInfoArgs = "--slave-info --lock-ddl " - XClusterBackupSlaveInfoArgs = "" + GalaxyEngineBackupExtraArgs = " --slave-info --lock-ddl " + XClusterBackupExtraArgs = " --rds-execute-backup-lock-timeout=120 " GalaxyEngineBackupStreamArgs = "--stream=xbstream" XClusterBackupStreamArgs = "--stream=tar" GalaxyEngineTargetDirArgs = "--target-dir=" @@ -257,3 +264,22 @@ func GetParameterLabel() map[string]string { ParameterName: ParameterType, } } + +const AutoRebuildConfigMapName = "auto-build" + +// Conventions for backup + +type BackupJobType string + +const ( + BackupJobTypeFullBackup = "backup" + BackupJobTypeBinlogBackup = "binlog" + BackupJobTypeCollect = "collect" +) + +func NewBackupJobName(targetPod *corev1.Pod, jobType BackupJobType) string { + return name.NewSplicedName( + name.WithTokens(string(jobType), "job", targetPod.Name, rand.String(4)), + name.WithPrefix(fmt.Sprintf("%s-job", jobType)), + ) +} diff --git a/pkg/operator/v1/xstore/factory/pod_extra.go b/pkg/operator/v1/xstore/factory/pod_extra.go index d59ddfc..babe4f2 100644 --- a/pkg/operator/v1/xstore/factory/pod_extra.go +++ b/pkg/operator/v1/xstore/factory/pod_extra.go @@ -324,6 +324,17 @@ func (f *DefaultExtraPodFactory) NewEnvs(ctx *PodFactoryContext) (map[string][]c template := ctx.template resources := template.Spec.Resources + engineEnvs := make([]corev1.EnvVar, 0) + configEnvs := ctx.xstore.Spec.Config.Envs + if configEnvs != nil { + for k, v := range configEnvs { + engineEnvs = append(engineEnvs, corev1.EnvVar{ + Name: k, + Value: v.String(), + }) + } + } + return map[string][]corev1.EnvVar{ convention.ContainerEngine: k8shelper.PatchEnvs( []corev1.EnvVar{ @@ -334,6 +345,7 @@ func (f *DefaultExtraPodFactory) NewEnvs(ctx *PodFactoryContext) (map[string][]c {Name: "LOG_DATA_SEPARATION", Value: strconv.FormatBool(ctx.xstore.Spec.Config.Dynamic.LogDataSeparation)}, }, f.newEnvsForEnginePorts(ctx), + engineEnvs, ), }, nil } diff --git a/pkg/operator/v1/xstore/factory/secret.go b/pkg/operator/v1/xstore/factory/secret.go index fe4bc1e..a74ff4d 100644 --- a/pkg/operator/v1/xstore/factory/secret.go +++ b/pkg/operator/v1/xstore/factory/secret.go @@ -17,13 +17,14 @@ limitations under the License. package factory import ( + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" + xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/pointer" - - polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" - "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" ) func NewSecret(xstore *polardbxv1.XStore) *corev1.Secret { @@ -58,3 +59,34 @@ func NewSecret(xstore *polardbxv1.XStore) *corev1.Secret { StringData: data, } } + +func NewSecretForRestore(rc *reconcile.Context, xstore *polardbxv1.XStore) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: convention.NewSecretName(xstore), + Namespace: xstore.Namespace, + Labels: convention.ConstLabels(xstore), + }, + Immutable: pointer.Bool(true), + Type: corev1.SecretTypeOpaque, + } + // try to get secret from pxb first + var secretName string + if xstore.Spec.Restore.BackupSet == "" || len(xstore.Spec.Restore.BackupSet) == 0 { + backup, err := rc.GetLastCompletedXStoreBackup(map[string]string{ + xstoremeta.LabelName: xstore.Spec.Restore.From.XStoreName, + }, rc.MustParseRestoreTime()) + if err != nil { + return nil, err + } + secretName = backup.Name + } else { + secretName = xstore.Spec.Restore.BackupSet + } + xsbSecret, err := rc.GetSecretByName(secretName) + if err != nil || xsbSecret == nil { + return nil, err + } + secret.Data = xsbSecret.Data + return secret, nil +} diff --git a/pkg/operator/v1/xstore/meta/annotations.go b/pkg/operator/v1/xstore/meta/annotations.go index eb56102..26fc4d2 100644 --- a/pkg/operator/v1/xstore/meta/annotations.go +++ b/pkg/operator/v1/xstore/meta/annotations.go @@ -16,6 +16,8 @@ limitations under the License. package meta +import "strings" + const AnnotationControllerHints = "xstore/controller.hints" const ( @@ -40,3 +42,22 @@ const ( const ( AnnotationRebuildFromPod = "xstore/rebuild_from_pod" ) + +const ( + AnnotationAdapting = "xstore/adapting" +) + +func IsAdaptingTrue(val string) bool { + val = strings.ToLower(val) + return val == "1" || val == "on" || val == "true" +} + +// Annotations for backup for xstore +const ( + // AnnotationCollectJobProbeLimit denotes retry limit of getting collect job when waiting collect job finished + AnnotationCollectJobProbeLimit = "xstore-backup/collect-job-probe-limit" +) + +const ( + AnnotationAutoRebuild = "xstore/auto-rebuild" +) diff --git a/pkg/operator/v1/xstore/meta/labels.go b/pkg/operator/v1/xstore/meta/labels.go index 9d9e69a..87dfbd4 100644 --- a/pkg/operator/v1/xstore/meta/labels.go +++ b/pkg/operator/v1/xstore/meta/labels.go @@ -25,21 +25,23 @@ import ( ) const ( - LabelName = "xstore/name" - LabelRand = "xstore/rand" - LabelRole = "xstore/role" - LabelPod = "xstore/pod" - LabelNodeRole = "xstore/node-role" - LabelServiceType = "xstore/service" - LabelNodeSet = "xstore/node-set" - LabelGeneration = "xstore/generation" - LabelPortLock = "xstore/port-lock" - LabelHash = "xstore/hash" - LabelConfigHash = "xstore/config-hash" - LabelPrimaryName = "xstore/primary-name" - LabelRebuildTask = "xstore/rebuild-task" - LabelOriginName = "xstore/origin-name" - LabelTmp = "xstore/tmp" + LabelName = "xstore/name" + LabelRand = "xstore/rand" + LabelRole = "xstore/role" + LabelPod = "xstore/pod" + LabelNodeRole = "xstore/node-role" + LabelServiceType = "xstore/service" + LabelNodeSet = "xstore/node-set" + LabelGeneration = "xstore/generation" + LabelPortLock = "xstore/port-lock" + LabelHash = "xstore/hash" + LabelConfigHash = "xstore/config-hash" + LabelPrimaryName = "xstore/primary-name" + LabelRebuildTask = "xstore/rebuild-task" + LabelOriginName = "xstore/origin-name" + LabelTargetXStore = "xstore/target-name" + LabelTmp = "xstore/tmp" + LabelAutoRebuild = "xstore/auto-rebuild" ) const ( diff --git a/pkg/operator/v1/xstore/plugin/common/steps/common.go b/pkg/operator/v1/xstore/plugin/common/steps/common.go index 325881f..9eeffcc 100644 --- a/pkg/operator/v1/xstore/plugin/common/steps/common.go +++ b/pkg/operator/v1/xstore/plugin/common/steps/common.go @@ -17,7 +17,6 @@ limitations under the License. package steps import ( - "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/plugin/galaxy/galaxy" "strings" "time" @@ -59,9 +58,7 @@ func transformPodsIntoNodesWithServices(rc *xstorev1reconcile.Context, pods []co if nodes[i].Role == strings.ToLower(xstoremeta.RoleLearner) { nodes[i].Host = service.Spec.ClusterIP } - if rc.MustGetXStore().Spec.Engine == galaxy.Engine { - nodes[i].Host = service.Spec.ClusterIP - } + nodes[i].Host = service.Spec.ClusterIP } return nodes } diff --git a/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_backup_reconciler.go b/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_backup_reconciler.go index e5e01be..49a3d05 100644 --- a/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_backup_reconciler.go +++ b/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_backup_reconciler.go @@ -67,7 +67,7 @@ func (r *GalaxyBackupReconciler) newReconcileTask(rc *xstorev1reconcile.BackupCo backupsteps.ExtractLastEventTimestamp(task) backupsteps.UpdatePhaseTemplate(xstorev1.XStoreBinlogWaiting)(task) case xstorev1.XStoreBinlogWaiting: - backupsteps.WaitPXCBackupFinished(task) + backupsteps.WaitPXCBinlogBackupFinished(task) backupsteps.SaveXStoreSecrets(task) backupsteps.UpdatePhaseTemplate(xstorev1.XStoreBackupFinished)(task) case xstorev1.XStoreBackupFinished: diff --git a/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_reconciler.go b/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_reconciler.go index 80aec5f..22142ad 100644 --- a/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_reconciler.go +++ b/pkg/operator/v1/xstore/plugin/galaxy/reconcilers/galaxy_reconciler.go @@ -229,6 +229,10 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor )(task) instancesteps.WaitUntilLeaderElected(task) + if featuregate.EnableAutoRebuildFollower.Enabled() { + instancesteps.CheckFollowerStatus(task) + } + // Purge logs with interval specified (but not less than 2 minutes). logPurgeInterval := 2 * time.Minute if xstore.Spec.Config.Dynamic.LogPurgeInterval != nil { @@ -253,9 +257,6 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor instancesteps.SyncEngineConfigMap, )(task) - // Sync my.cnf from my.cnf.override - instancesteps.UpdateMycnfParameters(task) - // Goto upgrading if topology changed. (not breaking the task flow) instancesteps.WhenTopologyChanged( instancesteps.UpdatePhaseTemplate(polardbxv1xstore.PhaseUpgrading), @@ -267,6 +268,11 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor control.Retry("Start PhaseUpgrading..."), )(task) + // get to adapt phase if the xstore needs adapt + instancesteps.WhenNeedAdapt(instancesteps.UpdatePhaseTemplate(polardbxv1xstore.PhaseAdapting), + control.Retry("Start PhaseAdapting..."), + )(task) + // Update the observed generation at the end of running phase. instancesteps.UpdateObservedGeneration(task) instancesteps.UpdateObservedTopologyAndConfig(task) @@ -296,18 +302,20 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor control.RetryAfter(30*time.Second, "Check every 30 seconds...")(task) case polardbxv1xstore.PhaseUpgrading, polardbxv1xstore.PhaseRepairing: selfHeal := xstore.Status.Phase == polardbxv1xstore.PhaseRepairing - + instancesteps.PrepareHostPathVolumes(task) switch xstore.Status.Stage { case polardbxv1xstore.StageEmpty: ec, err := instancesteps.LoadExecutionContext(rc) if err != nil { return nil, err } + newEc, err := instancesteps.NewExecutionContext(rc, xstore, selfHeal) if ec == nil { - ec, err = instancesteps.NewExecutionContext(rc, xstore, selfHeal) + ec = newEc } - - defer instancesteps.TrackAndLazyUpdateExecuteContext(ec)(task, true) + TrackAndLazyUpdateExecuteContextFunc := instancesteps.TrackAndLazyUpdateExecuteContext(ec) + defer TrackAndLazyUpdateExecuteContextFunc(task, true) + ec.Volumes = newEc.Volumes if featuregate.EnableGalaxyClusterMode.Enabled() { instancesteps.ReconcileConsensusRoleLabels(task) } else { @@ -348,6 +356,8 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor instancesteps.GetParametersRoleMap(task) instancesteps.UpdateXStoreConfigMap(task) instancesteps.SetGlobalVariables(task) + // Sync my.cnf from my.cnf.override + instancesteps.UpdateMycnfParameters(task) instancesteps.CloseXStoreUpdatePhase(task) instancesteps.UpdatePhaseTemplate(polardbxv1xstore.PhaseRunning, true)(task) } @@ -386,6 +396,29 @@ func (r *GalaxyReconciler) newReconcileTask(rc *xstorev1reconcile.Context, xstor )(task) instancesteps.CloseXStoreRestartPhase(task) instancesteps.UpdatePhaseTemplate(polardbxv1xstore.PhaseRunning, true)(task) + case polardbxv1xstore.PhaseAdapting: + switch xstore.Status.Stage { + case polardbxv1xstore.StageEmpty: + instancesteps.PrepareHostPathVolumes(task) + galaxyinstancesteps.CreatePodsAndServices(task) + instancesteps.UpdateSharedConfigMap(task) + instancesteps.DisableElection(task) + //先把logger节点和follower节点刷成learner,再把leader节点单节点拉起 + instancesteps.UpdateStageTemplate(polardbxv1xstore.StageFlushMetadata, true)(task) + case polardbxv1xstore.StageFlushMetadata: + instancesteps.FlushClusterMetadata(task) + instancesteps.UpdateStageTemplate(polardbxv1xstore.StageAdapting, true)(task) + case polardbxv1xstore.StageAdapting: + instancesteps.ReconcileConsensusRoleLabels(task) + instancesteps.WaitUntilLeaderElected(task) + instancesteps.ReAddFollower(task) + xstoreplugincommonsteps.SetVoterElectionWeightToOne(task) + instancesteps.WaitUntilCandidatesAndVotersReady(task) + instancesteps.UpdateStageTemplate(polardbxv1xstore.StageBeforeSuccess, true)(task) + case polardbxv1xstore.StageBeforeSuccess: + instancesteps.EnableElection(task) + instancesteps.UpdatePhaseTemplate(polardbxv1xstore.PhaseRunning, true)(task) + } case polardbxv1xstore.PhaseFailed: log.Info("Failed.") case polardbxv1xstore.PhaseUnknown: diff --git a/pkg/operator/v1/xstore/plugin/galaxy/steps/instance/log.go b/pkg/operator/v1/xstore/plugin/galaxy/steps/instance/log.go index f0f3cc1..5d111ad 100644 --- a/pkg/operator/v1/xstore/plugin/galaxy/steps/instance/log.go +++ b/pkg/operator/v1/xstore/plugin/galaxy/steps/instance/log.go @@ -17,6 +17,7 @@ limitations under the License. package instance import ( + v1 "github.com/alibaba/polardbx-operator/api/v1" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" "time" @@ -39,6 +40,16 @@ func PurgeLogsTemplate(d time.Duration) control.BindFunc { return plugin.NewStepBinder(galaxy.Engine, "PurgeLogs", func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + + backupBinlogList := v1.PolarDBXBackupBinlogList{} + err := rc.Client().List(rc.Context(), &backupBinlogList) + if err != nil { + return flow.RetryErr(err, "failed to get backup binlog list") + } + if len(backupBinlogList.Items) > 0 { + return flow.Pass() + } + xstore := rc.MustGetXStore() // if Purge binlog Locked diff --git a/pkg/operator/v1/xstore/reconcile/backup_context.go b/pkg/operator/v1/xstore/reconcile/backup_context.go index 3abc104..da24484 100644 --- a/pkg/operator/v1/xstore/reconcile/backup_context.go +++ b/pkg/operator/v1/xstore/reconcile/backup_context.go @@ -27,8 +27,8 @@ import ( "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" xstoreconvention "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" dbutil "github.com/alibaba/polardbx-operator/pkg/util/database" + "github.com/alibaba/polardbx-operator/pkg/util/name" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -38,7 +38,6 @@ import ( "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "strconv" ) type BackupContext struct { @@ -262,40 +261,42 @@ func (rc *BackupContext) GetXStoreTargetPod() (*corev1.Pod, error) { rolePodMap := make(map[string]*corev1.Pod) for i := range pods { - p := &pods[i] - rolePodMap[p.Labels[xstoremeta.LabelRole]] = p + pod := &pods[i] + rolePodMap[pod.Labels[xstoremeta.LabelRole]] = pod } - preferred, _ := xstoreBackup.Labels[meta.LabelPreferredBackupNode] - if preferred == xstoremeta.RoleLeader { // preferred backup node is leader, just set it - p, ok := rolePodMap[xstoremeta.RoleLeader] + if xstoreBackup.Spec.PreferredBackupRole == xstoremeta.RoleLeader { // preferred backup node is leader, just set it + pod, ok := rolePodMap[xstoremeta.RoleLeader] if !ok { return nil, errors.New("target pod is leader, but leader not found") } - rc.xstoreTargetPod = p - return p, nil + rc.xstoreTargetPod = pod + return pod, nil } - // if `PreferredBackupNode` has not been set or set to something other than leader, then we pick follower as backup pod - p, ok := rolePodMap[xstoremeta.RoleFollower] + // if `PreferredBackupRole` has not been set or set to something other than leader, then we pick follower as backup pod + pod, ok := rolePodMap[xstoremeta.RoleFollower] if !ok { return nil, errors.New("target pod is follower, but follower not found") } - manager, err := rc.GetXstoreGroupManagerByPod(p) + manager, err := rc.GetXstoreGroupManagerByPod(pod) if err != nil { return nil, err } if manager == nil { return nil, errors.New("fail to connect to follower") } + defer manager.Close() + status, err := manager.ShowSlaveStatus() if err != nil { return nil, err } if status.SlaveSQLRunning == "No" || status.LastError != "" { - return nil, errors.New("follower status abnormal") + return nil, errors.New("follower status abnormal, SlaveSQLRunning: " + status.SlaveSQLRunning + + ", LastError: " + status.LastError) } - rc.xstoreTargetPod = p + rc.xstoreTargetPod = pod } return rc.xstoreTargetPod, nil } @@ -401,7 +402,7 @@ func (rc *BackupContext) GetOrCreateXStoreBackupTaskConfigMap() (*corev1.ConfigM xstorebackup := rc.MustGetXStoreBackup() var cm corev1.ConfigMap - err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: util.XStoreBackupStableName(xstorebackup, "backup")}, &cm) + err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: name.XStoreBackupStableName(xstorebackup, "backup")}, &cm) if err != nil { if apierrors.IsNotFound(err) { rc.taskConfigMap = NewBackupTaskConfigMap(xstorebackup) @@ -501,34 +502,30 @@ func (rc *BackupContext) NewSecretFromXStore(secret *corev1.Secret) (*corev1.Sec } func (rc *BackupContext) GetXstoreGroupManagerByPod(pod *corev1.Pod) (group.GroupManager, error) { - var serviceList corev1.ServiceList - err := rc.Client().List(rc.Context(), &serviceList, client.InNamespace(rc.Namespace()), client.MatchingLabels{ - xstoremeta.LabelPod: pod.Name, - }) + podService, err := rc.xStoreContext.GetXStoreServiceForPod(pod.Name) if err != nil { return nil, err } - if len(serviceList.Items) == 0 { - return nil, errors.New("no service found related to xstore " + rc.xstore.Name) + host, port, err := k8shelper.GetClusterIpPortFromService(podService, convention.PortAccess) + if err != nil { + return nil, err } - host := serviceList.Items[0].Name + "." + pod.Namespace - port, err := strconv.Atoi(pod.Labels[xstoremeta.LabelPortLock]) secret, err := rc.GetSecret(pod.Labels[xstoremeta.LabelName]) if err != nil { return nil, err } user := xstoreconvention.SuperAccount - passwd, ok := secret.Data[user] - if !ok { - return nil, errors.New("can not get passwd for xsotre " + rc.xstore.Name) + passwd, err := rc.xStoreContext.GetXstoreAccountPasswordFromSecret(user, secret) + if err != nil { + return nil, err } return group.NewGroupManager( rc.Context(), dbutil.MySQLDataSource{ Host: host, - Port: port, - Username: user, - Password: string(passwd), + Port: int(port), + Username: xstoreconvention.SuperAccount, + Password: passwd, }, true, ), nil diff --git a/pkg/operator/v1/xstore/reconcile/context.go b/pkg/operator/v1/xstore/reconcile/context.go index 4a8b784..1492c0a 100644 --- a/pkg/operator/v1/xstore/reconcile/context.go +++ b/pkg/operator/v1/xstore/reconcile/context.go @@ -21,7 +21,7 @@ import ( "errors" "fmt" "github.com/alibaba/polardbx-operator/pkg/meta/core/gms" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" "k8s.io/utils/pointer" "strings" "time" @@ -324,6 +324,18 @@ func (rc *Context) GetXStoreClusterAddr(serviceType convention.ServiceType, port return k8shelper.GetClusterAddrFromService(svc, port) } +func (rc *Context) GetSecretByName(name string) (*corev1.Secret, error) { + secretKey := types.NamespacedName{ + Namespace: rc.xstoreKey.Namespace, + Name: name, + } + secret, err := rc.objectCache.GetObject(rc.Context(), secretKey, &corev1.Secret{}) + if err != nil { + return nil, err + } + return secret.(*corev1.Secret), nil +} + func (rc *Context) GetXStoreSecret() (*corev1.Secret, error) { xstore, err := rc.GetXStore() if err != nil { @@ -385,9 +397,13 @@ func (rc *Context) GetXStoreAccountPassword(user string) (string, error) { if err != nil { return "", err } + return rc.GetXstoreAccountPasswordFromSecret(user, secret) +} + +func (rc *Context) GetXstoreAccountPasswordFromSecret(user string, secret *corev1.Secret) (string, error) { passwd, ok := secret.Data[user] if !ok { - return "", errors.New("not found") + return "", errors.New("account " + user + " not found") } return string(passwd), nil } @@ -731,7 +747,7 @@ func (rc *Context) GetOrCreateXStoreTaskConfigMap() (*corev1.ConfigMap, error) { xstore := rc.MustGetXStore() var cm corev1.ConfigMap - err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: util.StableName(xstore, "restore")}, &cm) + err := rc.Client().Get(rc.Context(), types.NamespacedName{Namespace: rc.Namespace(), Name: name.StableName(xstore, "restore")}, &cm) if err != nil { if apierrors.IsNotFound(err) { rc.taskConfigMap = NewTaskConfigMap(xstore) diff --git a/pkg/operator/v1/xstore/steps/backup/backupjob.go b/pkg/operator/v1/xstore/steps/backup/backup_job.go similarity index 97% rename from pkg/operator/v1/xstore/steps/backup/backupjob.go rename to pkg/operator/v1/xstore/steps/backup/backup_job.go index feeb22d..4a5c10b 100644 --- a/pkg/operator/v1/xstore/steps/backup/backupjob.go +++ b/pkg/operator/v1/xstore/steps/backup/backup_job.go @@ -21,7 +21,7 @@ import ( k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,7 +59,7 @@ func patchTaskConfigMapVolumeAndVolumeMounts(xstoreBackup *xstorev1.XStoreBackup VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: util.XStoreBackupStableName(xstoreBackup, "backup"), + Name: name.XStoreBackupStableName(xstoreBackup, "backup"), }, }, }, diff --git a/pkg/operator/v1/xstore/steps/backup/binlogbackupJob.go b/pkg/operator/v1/xstore/steps/backup/binlogbackup_job.go similarity index 100% rename from pkg/operator/v1/xstore/steps/backup/binlogbackupJob.go rename to pkg/operator/v1/xstore/steps/backup/binlogbackup_job.go diff --git a/pkg/operator/v1/xstore/steps/backup/collectjob.go b/pkg/operator/v1/xstore/steps/backup/collect_job.go similarity index 100% rename from pkg/operator/v1/xstore/steps/backup/collectjob.go rename to pkg/operator/v1/xstore/steps/backup/collect_job.go diff --git a/pkg/operator/v1/xstore/steps/backup/status.go b/pkg/operator/v1/xstore/steps/backup/status.go index c5d713c..a657d0b 100644 --- a/pkg/operator/v1/xstore/steps/backup/status.go +++ b/pkg/operator/v1/xstore/steps/backup/status.go @@ -18,6 +18,7 @@ package backup import ( "bytes" + "errors" "fmt" polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" xstorev1 "github.com/alibaba/polardbx-operator/api/v1" @@ -25,17 +26,15 @@ import ( "github.com/alibaba/polardbx-operator/pkg/k8s/control" k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + xstoreconvention "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" xstorectrlerrors "github.com/alibaba/polardbx-operator/pkg/util/error" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "strconv" - "strings" "time" ) @@ -60,16 +59,6 @@ func UpdatePhaseTemplate(phase xstorev1.XStoreBackupPhase, requeue ...bool) cont }) } -func GenerateJobName(targetPod *corev1.Pod, JobLabel string) string { - // 理论情况下, jobName不应该超过63位, 并且在每次job完成后,我们会将job删除,所以这里应该不会出现同时job名称冲突的情况. - jobName := JobLabel + "-job-" + targetPod.Name + "-" + rand.String(4) - if len(jobName) >= 60 { - jobName = jobName[0:59] - jobName = strings.TrimRight(jobName, "-") - } - return jobName -} - var PersistentStatusChanges = NewStepBinder("PersistentStatusChanges", func(rc *xstorev1reconcile.BackupContext, flow control.Flow) (reconcile.Result, error) { if debug.IsDebugEnabled() { @@ -165,14 +154,16 @@ var StartXStoreFullBackupJob = NewStepBinder("StartXStoreFullBackupJob", return flow.Error(err, "Unable to get task context for backup") } + // retry until target pod found, ops allowed here xstoreBackup := rc.MustGetXStoreBackup() targetPod, err := rc.GetXStoreTargetPod() if err != nil { - return flow.Error(err, "Unable to find target pod!") + return flow.RetryAfter(5*time.Second, "Unable to find target pod, error: "+err.Error()) } if targetPod == nil { - return flow.Wait("Unable to find target pod!") + return flow.RetryAfter(5*time.Second, "Unable to find target pod, error: target pod status abnormal") } + if targetPod.Labels[xstoremeta.LabelRole] == xstoremeta.RoleLeader { // warning when backup on leader pod flow.Logger().Info("Warning: performing backup on leader", "leader pod", targetPod.Name) } @@ -185,7 +176,7 @@ var StartXStoreFullBackupJob = NewStepBinder("StartXStoreFullBackupJob", return flow.Continue("Full Backup job already started!", "job-name", job.Name) } - jobName := GenerateJobName(targetPod, "backup") + jobName := xstoreconvention.NewBackupJobName(targetPod, xstoreconvention.BackupJobTypeFullBackup) xstoreBackup.Status.TargetPod = targetPod.Name job, e := newBackupJob(xstoreBackup, targetPod, jobName) @@ -312,7 +303,7 @@ var StartCollectBinlogJob = NewStepBinder("StartCollectBinlogJob", if err != nil { return flow.Error(err, "Unable to get pxcBackup!") } - jobName := GenerateJobName(targetPod, "collect") + jobName := xstoreconvention.NewBackupJobName(targetPod, xstoreconvention.BackupJobTypeCollect) job, err = newCollectJob(xstoreBackup, targetPod, *polardbxBackup, jobName) if err != nil { @@ -323,10 +314,11 @@ var StartCollectBinlogJob = NewStepBinder("StartCollectBinlogJob", return flow.Error(err, "Unable to create job to initialize data") } - return flow.Continue("collect binlog job started!", "job-name", jobName) + // wait 10 seconds to ensure that job has been created + return flow.RetryAfter(10*time.Second, "collect binlog job started!", "job-name", jobName) }) -var WaitCollectBinlogJobFinished = NewStepBinder("WaitBackupJobFinished", +var WaitCollectBinlogJobFinished = NewStepBinder("WaitCollectBinlogJobFinished", func(rc *xstorev1reconcile.BackupContext, flow control.Flow) (reconcile.Result, error) { xstore, err := rc.GetXStore() if err != nil { @@ -336,12 +328,29 @@ var WaitCollectBinlogJobFinished = NewStepBinder("WaitBackupJobFinished", return flow.Continue("GMS don't need to collect binlog job!", "xstore-name:", xstore.Name) } + // in case that collect job not found, allow retry ${probeLimit} times, by default the limit is 5 + probeLimit := 5 + xstoreBackup := rc.MustGetXStoreBackup() + if limitAnnotation, ok := xstoreBackup.Annotations[xstoremeta.AnnotationCollectJobProbeLimit]; ok { + if tempLimit, err := strconv.Atoi(limitAnnotation); err != nil { + probeLimit = tempLimit // only update when valid annotation parsed + } + } + flow.Logger().Info("fetch collect job probe limit from annotation", "limit", probeLimit) + job, err := rc.GetCollectBinlogJob() if client.IgnoreNotFound(err) != nil { return flow.Error(err, "Unable to get collect binlog job!") } if job == nil { - return flow.Continue("Collect binlog job removed!") + if probeLimit--; probeLimit >= 0 { // update probe limit and record into xsb + xstoreBackup.Annotations[xstoremeta.AnnotationCollectJobProbeLimit] = strconv.Itoa(probeLimit) + if err := rc.UpdateXStoreBackup(); err != nil { + return flow.Error(err, "Unable to update collect job probe limit") + } + return flow.Retry("Retry to get collect binlog job") + } + return flow.Error(errors.New("collect binlog job abnormal"), "Collect binlog job not found, retry limits reached!") } if !k8shelper.IsJobCompleted(job) { @@ -411,7 +420,7 @@ var StartBinlogBackupJob = NewStepBinder("StartBinlogBackupJob", return flow.Continue("Collect job already started!", "job-name", job.Name) } - jobName := GenerateJobName(targetPod, "binlog") + jobName := xstoreconvention.NewBackupJobName(targetPod, xstoreconvention.BackupJobTypeBinlogBackup) if targetPod.Labels[polardbxmeta.LabelRole] == polardbxmeta.RoleGMS { job, err = newBinlogBackupJob(xstoreBackup, targetPod, jobName, true) @@ -532,16 +541,16 @@ var RemoveXSBackupOverRetention = NewStepBinder("RemoveXSBackupOverRetention", return flow.Continue("PolarDBX backup deleted!", "XSBackup-name", backup.Name) }) -var WaitPXCBackupFinished = NewStepBinder("WaitPXCBackupFinished", +var WaitPXCBinlogBackupFinished = NewStepBinder("WaitPXCBinlogBackupFinished", func(rc *xstorev1reconcile.BackupContext, flow control.Flow) (reconcile.Result, error) { polardbxBackup, err := rc.GetPolarDBXBackup() if err != nil { - flow.Error(err, "Unable to find polardbxBackup") + flow.Error(err, "Unable to find get PolarDBX backup") } - if polardbxBackup.Status.Phase != polardbxv1.BackupFinished { - return flow.RetryAfter(5*time.Second, "Wait polardbx backup Finished", "pxcBackup", polardbxBackup.Name) + if polardbxBackup.Status.Phase != polardbxv1.MetadataBackuping { + return flow.RetryAfter(5*time.Second, "Wait until PolarDBX binlog backup finished", "pxc backup", polardbxBackup.Name) } - return flow.Continue("Backup Finished!") + return flow.Continue("PolarDBX binlog backup finished.") }) var SaveXStoreSecrets = NewStepBinder("SaveXStoreSecrets", @@ -564,5 +573,5 @@ var SaveXStoreSecrets = NewStepBinder("SaveXStoreSecrets", if err != nil { return flow.Error(err, "Unable to create account secret while backuping") } - return flow.Continue("XStore Secrets Saved!") + return flow.Continue("XStore Secret Saved!") }) diff --git a/pkg/operator/v1/xstore/steps/follower/check.go b/pkg/operator/v1/xstore/steps/follower/check.go index fbbac51..e546486 100644 --- a/pkg/operator/v1/xstore/steps/follower/check.go +++ b/pkg/operator/v1/xstore/steps/follower/check.go @@ -21,6 +21,7 @@ import ( polarxv1 "github.com/alibaba/polardbx-operator/api/v1" polarxv1xstore "github.com/alibaba/polardbx-operator/api/v1/xstore" "github.com/alibaba/polardbx-operator/pkg/k8s/control" + k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -42,8 +43,11 @@ var CheckXStore = NewStepBinder("CheckXStore", rc.MarkChanged() flow.Wait("WaitUntil xstore is being") } - xStoreFollower.SetLabels(map[string]string{ + if xStoreFollower.GetLabels() == nil { + xStoreFollower.SetLabels(map[string]string{}) + } + xStoreFollower.SetLabels(k8shelper.PatchLabels(xStoreFollower.GetLabels(), map[string]string{ xstoremeta.LabelName: xStoreFollower.Spec.XStoreName, - }) + })) return flow.Continue("CheckXStore success.") }) diff --git a/pkg/operator/v1/xstore/steps/follower/job.go b/pkg/operator/v1/xstore/steps/follower/job.go index f361cb1..edd0806 100644 --- a/pkg/operator/v1/xstore/steps/follower/job.go +++ b/pkg/operator/v1/xstore/steps/follower/job.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "strconv" "strings" + "time" ) type JobContext struct { @@ -74,23 +75,18 @@ var ( JobTaskInitLogger: JobArgInitLoggerFunc, } BackupToolBinFilePaths = map[string]string{ - "xcluster": XClusterBackupBinFilepath, galaxy.Engine: GalaxyEngineBackupBinFilepath, } BackupSetPrepareArgs = map[string]string{ - "xcluster": XClusterBackupSetPrepareArg, galaxy.Engine: GalaxyEngineBackupSetPrepareArg, } - BackupSlaveInfoArgs = map[string]string{ - "xcluster": XClusterBackupSlaveInfoArgs, - galaxy.Engine: GalaxyEngineBackupSlaveInfoArgs, + BackupExtraArgs = map[string]string{ + galaxy.Engine: GalaxyEngineBackupExtraArgs, } BackupStreamTypeArgs = map[string]string{ - "xcluster": XClusterBackupStreamArgs, galaxy.Engine: GalaxyEngineBackupStreamArgs, } TargetDirArgs = map[string]string{ - "xcluster": XClusterTargetDirArgs, galaxy.Engine: GalaxyEngineTargetDirArgs, } ) @@ -130,7 +126,7 @@ func JobCommandInitLoggerFunc(ctx JobContext) []string { func JobArgBackupFunc(ctx JobContext) []string { return []string{ "-c", - "touch /tmp/rebuild.log && tail -f /tmp/rebuild.log & " + BackupToolBinFilePaths[ctx.engine] + " --defaults-file=/data/mysql/conf/my.cnf --backup " + BackupSlaveInfoArgs[ctx.engine] + " --user=root --socket='/data/mysql/run/mysql.sock' " + BackupStreamTypeArgs[ctx.engine] + " " + TargetDirArgs[ctx.engine] + "/tmp/backup 2>/tmp/rebuild.log " + + "touch /tmp/rebuild.log && tail -f /tmp/rebuild.log & " + BackupToolBinFilePaths[ctx.engine] + " --defaults-file=/data/mysql/conf/my.cnf --backup " + BackupExtraArgs[ctx.engine] + " --user=root --socket='/data/mysql/run/mysql.sock' " + BackupStreamTypeArgs[ctx.engine] + " " + TargetDirArgs[ctx.engine] + "/tmp/backup 2>/tmp/rebuild.log " + "| /tools/xstore/current/bin/polardbx-filestream-client " + BackupStreamTypeArgs[ctx.engine] + " --meta.action=uploadRemote " + fmt.Sprintf(" --meta.instanceId='%s' ", GetFileStreamInstanceId(ctx.otherPod)) + fmt.Sprintf(" --meta.filename='%s' ", FileStreamBackupFilename) + fmt.Sprintf(" --destNodeName='%s' ", ctx.otherPod.Spec.NodeName) + " --hostInfoFilePath=/tools/xstore/hdfs-nodes.json && /tools/xstore/current/venv/bin/python3 /tools/xstore/current/cli.py process check_std_err_complete --filepath=/tmp/rebuild.log ", } @@ -205,7 +201,7 @@ func newJobName(task JobTask, targetPod *corev1.Pod) string { if val, ok := targetPod.Labels[polarxmeta.LabelDNIndex]; ok { suffix += suffix + "-" + val } - return fmt.Sprintf("job-%s-%s-%s%s", string(task), hashStr, targetPod.Labels[xstoremeta.LabelNodeSet], suffix) + return fmt.Sprintf("job-%s-%s-%d-%s%s", string(task), hashStr, time.Now().Unix(), targetPod.Labels[xstoremeta.LabelNodeSet], suffix) } func newJob(ctx JobContext) *batchv1.Job { diff --git a/pkg/operator/v1/xstore/steps/follower/pod.go b/pkg/operator/v1/xstore/steps/follower/pod.go index d2900c5..c3de0d1 100644 --- a/pkg/operator/v1/xstore/steps/follower/pod.go +++ b/pkg/operator/v1/xstore/steps/follower/pod.go @@ -159,6 +159,10 @@ var TryLoadTargetPod = NewStepBinder("TryLoadTargetPod", func(rc *xstorev1reconc xstoreFollower.Status.TargetPodName = targetPodName xstoreFollower.Status.RebuildPodName = targetPodName xstoreFollower.Status.RebuildNodeName = pod.Spec.NodeName + xstoreFollower.SetLabels(k8shelper.PatchLabels(xstoreFollower.GetLabels(), map[string]string{ + xstoremeta.LabelTargetXStore: pod.Labels[xstoremeta.LabelName], + xstoremeta.LabelPod: pod.Name, + })) rc.MarkChanged() return flow.Continue("TryLoadTargetPod Success.") }) @@ -303,7 +307,8 @@ var ClearAndMarkElectionWeight = NewStepBinder("ClearAndMarkElectionWeight", fun } oldWeights, err := xstoreinstance.SetPodElectionWeight(xstoreContext, leaderPod, flow.Logger(), 0, []string{targetPodName}) if err != nil { - return flow.RetryErr(err, "") + flow.Logger().Error(err, "ClearAndMarkElectionWeight skip") + return flow.Continue("ClearAndMarkElectionWeight Skip.") } xStoreFollower.Status.ElectionWeight = oldWeights[0] rc.MarkChanged() diff --git a/pkg/operator/v1/xstore/steps/instance/common.go b/pkg/operator/v1/xstore/steps/instance/common.go index d9f9a26..3fd6959 100644 --- a/pkg/operator/v1/xstore/steps/instance/common.go +++ b/pkg/operator/v1/xstore/steps/instance/common.go @@ -563,6 +563,18 @@ var UpdateXStoreConfigMap = xstorev1reconcile.NewStepBinder("UpdateXStoreConfigM templateCm.Data[convention.ConfigMyCnfOverride] = iniutil.ToString(mycnfOverrride) + mycnfVersion := templateCm.Data["my.cnf.override.version"] + if mycnfVersion == "" { + templateCm.Data["my.cnf.override.version"] = "1" + } else { + version, err := strconv.Atoi(mycnfVersion) + if err != nil { + return flow.Error(err, "Unable to get version of my.cnf.override.") + } + version = version + 1 + templateCm.Data["my.cnf.override.version"] = strconv.Itoa(version) + } + // Update config map. err = rc.Client().Update(rc.Context(), templateCm) if err != nil { diff --git a/pkg/operator/v1/xstore/steps/instance/consensus.go b/pkg/operator/v1/xstore/steps/instance/consensus.go index 1f64d28..c99049d 100644 --- a/pkg/operator/v1/xstore/steps/instance/consensus.go +++ b/pkg/operator/v1/xstore/steps/instance/consensus.go @@ -53,6 +53,7 @@ type ShowSlaveStatusResult struct { SlaveIORunning string SlaveSQLRunning string SalveSqlRunningState string + LastSqlError string SecondsBehindMaster float64 } @@ -80,9 +81,10 @@ func ShowSlaveStatus(rc *xstorev1reconcile.Context, pod *corev1.Pod, logger logr if err != nil { return nil, err } - secondsBehindMaster, err := strconv.ParseFloat(strings.TrimSpace(oneParsedResult["seconds_behind_master"].(string)), 64) + var secondsBehindMaster float64 + secondsBehindMaster, err = strconv.ParseFloat(strings.TrimSpace(oneParsedResult["seconds_behind_master"].(string)), 64) if err != nil { - return nil, err + secondsBehindMaster = -1 } showSlaveStatusResult := ShowSlaveStatusResult{ RelayLogFile: strings.TrimSpace(oneParsedResult["relay_log_file"].(string)), @@ -90,6 +92,7 @@ func ShowSlaveStatus(rc *xstorev1reconcile.Context, pod *corev1.Pod, logger logr SlaveIORunning: strings.TrimSpace(oneParsedResult["slave_io_running"].(string)), SlaveSQLRunning: strings.TrimSpace(oneParsedResult["slave_sql_running"].(string)), SalveSqlRunningState: strings.TrimSpace(oneParsedResult["slave_sql_running_state"].(string)), + LastSqlError: strings.TrimSpace(oneParsedResult["last_sql_error"].(string)), SecondsBehindMaster: secondsBehindMaster, } return &showSlaveStatusResult, nil @@ -546,3 +549,80 @@ var DropLearnerOnLeader = xstorev1reconcile.NewStepBinder("DropLearnerOnLeader", return flow.Continue("Learner nodes deleted.") }, ) + +func DisableElectionByPod(rc *xstorev1reconcile.Context, pod *corev1.Pod, logger logr.Logger) error { + cmd := xstoreexec.NewCanonicalCommandBuilder().Consensus().DisableElection().Build() + err := rc.ExecuteCommandOn(pod, "engine", cmd, control.ExecOptions{ + Logger: logger, + }) + if err != nil { + return err + } + return nil +} + +func EnableElectionByPod(rc *xstorev1reconcile.Context, pod *corev1.Pod, logger logr.Logger) error { + cmd := xstoreexec.NewCanonicalCommandBuilder().Consensus().EnableElection().Build() + err := rc.ExecuteCommandOn(pod, "engine", cmd, control.ExecOptions{ + Logger: logger, + }) + if err != nil { + return err + } + return nil +} + +var DisableElection = xstorev1reconcile.NewStepBinder("DisableElection", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + + pods, err := rc.GetXStorePods() + if err != nil { + return flow.RetryErr(err, "Failed to get pods") + } + leaderPod, err := GetLeaderPod(rc, flow.Logger(), true) + if err != nil || leaderPod == nil { + return flow.RetryErr(err, "Failed to get leader pod") + } + var execErr error + var errPodName string + for _, pod := range pods { + if pod.Name == leaderPod.Name { + continue + } + currentExecErr := DisableElectionByPod(rc, &pod, flow.Logger()) + if currentExecErr != nil { + execErr = currentExecErr + errPodName = pod.Name + } + } + if execErr != nil { + return flow.RetryErr(execErr, "Failed to disable election", "pod", errPodName) + } + + return flow.Continue("Disable Election Success.") + }, +) + +var EnableElection = xstorev1reconcile.NewStepBinder("EnableElection", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + + pods, err := rc.GetXStorePods() + if err != nil { + return flow.RetryErr(err, "Failed to get pods") + } + var execErr error + var errPodName string + for _, pod := range pods { + currentExecErr := EnableElectionByPod(rc, &pod, flow.Logger()) + if currentExecErr != nil { + execErr = currentExecErr + errPodName = pod.Name + } + } + if execErr != nil { + return flow.RetryErr(execErr, "Failed to enable election", "pod", errPodName) + } + + return flow.Continue("Enable Election Success.") + }, +) diff --git a/pkg/operator/v1/xstore/steps/instance/objects.go b/pkg/operator/v1/xstore/steps/instance/objects.go index 5fba410..5edfe26 100644 --- a/pkg/operator/v1/xstore/steps/instance/objects.go +++ b/pkg/operator/v1/xstore/steps/instance/objects.go @@ -17,6 +17,7 @@ limitations under the License. package instance import ( + "context" "strings" "github.com/go-logr/logr" @@ -44,7 +45,7 @@ var CreateSecret = xstorev1reconcile.NewStepBinder("CreateSecret", } if secret == nil { if xstore.Spec.Restore != nil { - secret, err := rc.CreateSecretByXStore(xstore) + secret, err := factory.NewSecretForRestore(rc, xstore) if err != nil { return flow.Error(err, "unable to get secret while restoring") } @@ -127,6 +128,9 @@ func CreatePodsAndPodServicesWithExtraFactory(extraPodFactory factory.ExtraPodFa newCnt++ } else { + if xstore.Status.Phase == polardbxv1xstore.PhaseAdapting { + continue + } // update if generation is too old. observedGeneration, _ := convention.GetGenerationLabelValue(pod) @@ -157,7 +161,7 @@ func CreatePodsAndPodServicesWithExtraFactory(extraPodFactory factory.ExtraPodFa // Get current pod services. podServices, err := rc.GetXStorePodServices() if err != nil { - return flow.Error(err, "Unable to get pod services.") + return flow.RetryErr(err, "Unable to get pod services.") } // For each pod, create a pod service. @@ -169,7 +173,7 @@ func CreatePodsAndPodServicesWithExtraFactory(extraPodFactory factory.ExtraPodFa svc := factory.NewClusterIpService(xstore, podMap[podName]) err := rc.SetControllerRefAndCreate(svc) if err != nil { - return flow.Error(err, "Unable to create service for pod.", "pod", podName) + return flow.RetryErr(err, "Unable to create service for pod.", "pod", podName) } } } @@ -325,7 +329,7 @@ var UpdateMycnfParameters = xstorev1reconcile.NewStepBinder("UpdateMycnfParamete // update my.cnf locally. err := xstoreexec.UpdateMycnfParameters(rc, &pod, "engine", flow.Logger()) - if err != nil { + if err != nil && err != context.DeadlineExceeded { return flow.Error(err, "Failed to update my.cnf locally.", "pod", pod.Name) } } diff --git a/pkg/operator/v1/xstore/steps/instance/rebuild.go b/pkg/operator/v1/xstore/steps/instance/rebuild.go index ef3941c..39b7cfd 100644 --- a/pkg/operator/v1/xstore/steps/instance/rebuild.go +++ b/pkg/operator/v1/xstore/steps/instance/rebuild.go @@ -1,12 +1,22 @@ package instance import ( + "fmt" polarxv1 "github.com/alibaba/polardbx-operator/api/v1" + polardbxv1xstore "github.com/alibaba/polardbx-operator/api/v1/xstore" "github.com/alibaba/polardbx-operator/pkg/k8s/control" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "strings" + "time" ) var CleanRebuildJob = xstorev1reconcile.NewStepBinder("CleanRebuildJob", @@ -35,3 +45,176 @@ var CleanRebuildJob = xstorev1reconcile.NewStepBinder("CleanRebuildJob", } return flow.Retry("CleanRebuildJob retry.") }) + +func newXStoreFollower(rc *xstorev1reconcile.Context, targetPod *corev1.Pod) *polarxv1.XStoreFollower { + xstore := rc.MustGetXStore() + xstoreName := xstore.Name + if xstore.Spec.PrimaryXStore != "" { + xstoreName = xstore.Spec.PrimaryXStore + } + time.Now().UnixMilli() + rebuildTask := polarxv1.XStoreFollower{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("auto-ts%d", time.Now().UnixMilli()), + Namespace: xstore.Namespace, + Labels: map[string]string{ + xstoremeta.LabelAutoRebuild: "true", + }, + }, + Spec: polarxv1.XStoreFollowerSpec{ + Local: false, + TargetPodName: targetPod.Name, + XStoreName: xstoreName, + }, + } + return &rebuildTask +} + +func isHaOff(val string) bool { + lowerCaseHaVal := strings.ToLower(val) + if lowerCaseHaVal == "off" || lowerCaseHaVal == "0" || lowerCaseHaVal == "false" { + return true + } + return false +} + +func isPodInDebugRunmode(pod *corev1.Pod) bool { + if val, ok := pod.Annotations["runmode"]; ok { + val = strings.ToLower(val) + if val == "debug" { + return true + } + } + return false +} + +func TryGetAutoRebuildToken(rc *xstorev1reconcile.Context, rebuildTaskName string) error { + var configMap corev1.ConfigMap + err := rc.Client().Get(rc.Context(), types.NamespacedName{ + Namespace: rc.Namespace(), + Name: convention.AutoRebuildConfigMapName, + }, &configMap) + if err != nil { + if errors.IsNotFound(err) { + //create autorebuild config map + configMap = corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: convention.AutoRebuildConfigMapName, + Namespace: rc.Namespace(), + }, + Data: map[string]string{}, + } + err := rc.Client().Create(rc.Context(), &configMap) + if err != nil { + return err + } + } else { + return err + } + } + data := configMap.Data + if data == nil { + data = map[string]string{} + } + newMap := map[string]string{} + if len(data) >= rc.Config().Store().GetMaxAutoRebuildingCount() { + // try release + for k, v := range data { + ts, _ := strconv.ParseInt(v, 10, 63) + // check if the token has been hold more than 10 seconds + // < 10, hold the token + // >= 10, check the status of rebuild task + if time.Now().Unix()-ts < 10 { + newMap[k] = v + } else { + var xf polarxv1.XStoreFollower + err := rc.Client().Get(rc.Context(), types.NamespacedName{ + Namespace: rc.Namespace(), + Name: k, + }, &xf) + release := false + if err != nil { + if errors.IsNotFound(err) { + release = true + } + } else { + if polardbxv1xstore.IsEndPhase(xf.Status.Phase) { + release = true + } + } + if !release { + newMap[k] = v + } + } + } + if len(newMap) >= rc.Config().Store().GetMaxAutoRebuildingCount() { + return fmt.Errorf("no rebuid task token could be released") + } + data = newMap + } + data[rebuildTaskName] = strconv.FormatInt(time.Now().Unix(), 10) + configMap.Data = data + err = rc.Client().Update(rc.Context(), &configMap) + if err != nil { + return err + } + return nil +} + +var CheckFollowerStatus = xstorev1reconcile.NewStepBinder("CheckFollowerStatus", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + xstore := rc.MustGetXStore() + if annoVal, ok := xstore.Annotations[xstoremeta.AnnotationAutoRebuild]; ok && isHaOff(annoVal) { + return flow.Pass() + } + pods, err := rc.GetXStorePods() + if err != nil { + return flow.RetryErr(err, "Failed to get xstore pods") + } + for _, pod := range pods { + if xstoremeta.IsPodRoleVoter(&pod) || xstoremeta.IsRoleLeader(&pod) || isPodInDebugRunmode(&pod) { + continue + } + showSlaveStatusResult, err := ShowSlaveStatus(rc, &pod, flow.Logger()) + if err != nil { + // ignore now + flow.Logger().Error(err, "failed to show slave status", "podname", pod.Name) + continue + } + if showSlaveStatusResult.LastSqlError != "" && strings.ToLower(showSlaveStatusResult.SlaveSQLRunning) == "no" { + // create rebuild task. check if a rebuild task exists + var xstoreFollowerList polarxv1.XStoreFollowerList + err := rc.Client().List(rc.Context(), &xstoreFollowerList, client.InNamespace(rc.Namespace()), client.MatchingLabels(map[string]string{ + xstoremeta.LabelTargetXStore: pod.Labels[xstoremeta.LabelName], + xstoremeta.LabelPod: pod.Name, + })) + if err != nil { + flow.Logger().Error(err, "failed to list rebuild task", "podname", pod.Name) + continue + } + var exist bool + if len(xstoreFollowerList.Items) > 0 { + for _, xstoreFollower := range xstoreFollowerList.Items { + if !polardbxv1xstore.IsEndPhase(xstoreFollower.Status.Phase) { + exist = true + break + } + } + } + if !exist { + xstoreFollower := newXStoreFollower(rc, &pod) + err := TryGetAutoRebuildToken(rc, xstoreFollower.Name) + if err != nil { + flow.Logger().Error(err, "Failed to TryGetAutoRebuildToken") + } + if err == nil { + err := rc.Client().Create(rc.Context(), xstoreFollower) + if err != nil { + flow.Logger().Error(err, "Failed to create xstore follower") + } + } + } + } + } + return flow.Pass() + }) diff --git a/pkg/operator/v1/xstore/steps/instance/recoverjob.go b/pkg/operator/v1/xstore/steps/instance/recoverjob.go index 31421de..b43c2d5 100644 --- a/pkg/operator/v1/xstore/steps/instance/recoverjob.go +++ b/pkg/operator/v1/xstore/steps/instance/recoverjob.go @@ -21,7 +21,7 @@ import ( k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,7 +55,7 @@ func newRecoverDataJob(xstore *xstorev1.XStore, targetPod *corev1.Pod, secret st return &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ - Name: util.StableName(xstore, util.GetStableNameSuffix(xstore, targetPod.Name)+"-recover"), + Name: name.StableName(xstore, name.GetStableNameSuffix(xstore, targetPod.Name)+"-recover"), Namespace: xstore.Namespace, Labels: map[string]string{ xstoremeta.LabelName: xstore.Name, diff --git a/pkg/operator/v1/xstore/steps/instance/restore.go b/pkg/operator/v1/xstore/steps/instance/restore.go index b87f31d..618c105 100644 --- a/pkg/operator/v1/xstore/steps/instance/restore.go +++ b/pkg/operator/v1/xstore/steps/instance/restore.go @@ -17,8 +17,10 @@ limitations under the License. package instance import ( + "errors" "fmt" polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/polardbx" xstorev1 "github.com/alibaba/polardbx-operator/api/v1/xstore" "github.com/alibaba/polardbx-operator/pkg/k8s/control" k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" @@ -27,8 +29,9 @@ import ( xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/plugin/common/channel" xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,14 +40,16 @@ import ( ) type RestoreJobContext struct { - BackupFilePath string `json:"backupFilePath,omitempty"` - BackupCommitIndex *int64 `json:"backupCommitIndex,omitempty"` - BinlogDirPath string `json:"binlogDirPath,omitempty"` - BinlogEndOffsetPath string `json:"binlogEndOffsetPath,omitempty"` - IndexesPath string `json:"indexesPath,omitempty"` - CpFilePath string `json:"cpfilePath,omitempty"` - StorageName polardbxv1.BackupStorage `json:"storageName,omitempty"` - Sink string `json:"sink,omitempty"` + BackupFilePath string `json:"backupFilePath,omitempty"` + BackupCommitIndex *int64 `json:"backupCommitIndex,omitempty"` + BinlogDirPath string `json:"binlogDirPath,omitempty"` + BinlogEndOffsetPath string `json:"binlogEndOffsetPath,omitempty"` + IndexesPath string `json:"indexesPath,omitempty"` + CpFilePath string `json:"cpfilePath,omitempty"` + StorageName polardbx.BackupStorage `json:"storageName,omitempty"` + Sink string `json:"sink,omitempty"` + PitrEndpoint string `json:"pitrEndpoint,omitempty"` + PitrXStore string `json:"pitrXStore,omitempty"` } var CheckXStoreRestoreSpec = xstorev1reconcile.NewStepBinder("CheckXStoreRestoreSpec", @@ -91,7 +96,7 @@ var StartRestoreJob = xstorev1reconcile.NewStepBinder("StartRestoreJob", jobCreated := false for _, pod := range pods { - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, pod.Name) + "-restore") + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, pod.Name) + "-restore") if client.IgnoreNotFound(err) != nil { return flow.Error(err, "Unable to get restore data job.", "pod", pod.Name) } @@ -122,7 +127,7 @@ var WaitUntilRestoreJobFinished = xstorev1reconcile.NewStepBinder("WaitUntilRest return flow.Error(err, "Unable to get pods for xcluster.") } for _, pod := range pods { - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, pod.Name) + "-restore") + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, pod.Name) + "-restore") if err != nil { return flow.Error(err, "Unable to get xstore restore data job", "pod", pod.Name) } @@ -140,7 +145,6 @@ var PrepareRestoreJobContext = xstorev1reconcile.NewStepBinder("PrepareRestoreJo const restoreJobKey = "restore" // Check if exists, exit if true. - exists, err := rc.IsTaskContextExists(restoreJobKey) if err != nil { return flow.Error(err, "Unable to determine job context for restore!") @@ -151,10 +155,10 @@ var PrepareRestoreJobContext = xstorev1reconcile.NewStepBinder("PrepareRestoreJo // Prepare context. xstore := rc.MustGetXStore() - fromXStoreName := xstore.Spec.Restore.From.XStoreName backup := &polardbxv1.XStoreBackup{} if xstore.Spec.Restore.BackupSet == "" || len(xstore.Spec.Restore.BackupSet) == 0 { + // TODO(dengli): with metadata backup // Parse restore time. restoreTime := rc.MustParseRestoreTime() // Get last backup @@ -181,9 +185,15 @@ var PrepareRestoreJobContext = xstorev1reconcile.NewStepBinder("PrepareRestoreJo xstoreBackupKey := types.NamespacedName{Namespace: rc.Namespace(), Name: xstore.Spec.Restore.BackupSet} err := rc.Client().Get(rc.Context(), xstoreBackupKey, backup) if err != nil { - return flow.Error(err, "Unable to get xstoreBackup by BackupSet") + return flow.Error(err, "Can not get xstore backup set", "backup set key", xstoreBackupKey) } } + if backup == nil { + return flow.Error(errors.New("xstore backup obejct is null"), "Can not get xstore backup set") + } + backupRootPath := backup.Status.BackupRootPath + lastCommitIndex := backup.Status.CommitIndex + //Update sharedchannel sharedCm, err := rc.GetXStoreConfigMap(convention.ConfigMapTypeShared) if err != nil { @@ -195,14 +205,13 @@ var PrepareRestoreJobContext = xstorev1reconcile.NewStepBinder("PrepareRestoreJo return flow.Error(err, "Unable to parse shared channel from config map.") } - sharedChannel.UpdateLastBackupBinlogIndex(&backup.Status.CommitIndex) + sharedChannel.UpdateLastBackupBinlogIndex(&lastCommitIndex) sharedCm.Data[channel.SharedChannelKey] = sharedChannel.String() err = rc.Client().Update(rc.Context(), sharedCm) if err != nil { return flow.Error(err, "Unable to update shared config map.") } - backupRootPath := backup.Status.BackupRootPath fullBackupPath := fmt.Sprintf("%s/%s/%s.xbstream", backupRootPath, polardbxmeta.FullBackupPath, fromXStoreName) binlogEndOffsetPath := fmt.Sprintf("%s/%s/%s-end", @@ -216,13 +225,15 @@ var PrepareRestoreJobContext = xstorev1reconcile.NewStepBinder("PrepareRestoreJo // Save. if err := rc.SaveTaskContext(restoreJobKey, &RestoreJobContext{ BackupFilePath: fullBackupPath, - BackupCommitIndex: &backup.Status.CommitIndex, + BackupCommitIndex: &lastCommitIndex, BinlogDirPath: binlogBackupDir, BinlogEndOffsetPath: binlogEndOffsetPath, IndexesPath: indexesPath, CpFilePath: cpFilePath, StorageName: backup.Spec.StorageProvider.StorageName, Sink: backup.Spec.StorageProvider.Sink, + PitrEndpoint: xstore.Spec.Restore.PitrEndpoint, + PitrXStore: xstore.Spec.Restore.From.XStoreName, }); err != nil { return flow.Error(err, "Unable to save job context for restore!") } @@ -238,17 +249,15 @@ var RemoveRestoreJob = xstorev1reconcile.NewStepBinder("RemoveRestoreJob", return flow.Error(err, "Unable to get pods for xcluster.") } for _, pod := range pods { - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, pod.Name) + "-restore") - if err != nil { + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, pod.Name) + "-restore") + if err != nil && !apierrors.IsNotFound(err) { return flow.Error(err, "Unable to get xstore restore data job", "pod", pod.Name) } - if job == nil { - return flow.Continue("Restore job removed!") - } - - err = rc.Client().Delete(rc.Context(), job, client.PropagationPolicy(metav1.DeletePropagationBackground)) - if client.IgnoreNotFound(err) != nil { - return flow.Error(err, "Unable to remove restore job", "job-name", job.Name) + if job != nil { + err = rc.Client().Delete(rc.Context(), job, client.PropagationPolicy(metav1.DeletePropagationBackground)) + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Unable to remove restore job", "job-name", job.Name) + } } } return flow.Continue("Restore job removed!") @@ -287,7 +296,7 @@ var StartRecoverJob = xstorev1reconcile.NewStepBinder("StartRecoverJob", } jobCreated := false - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") if client.IgnoreNotFound(err) != nil { return flow.Error(err, "Unable to get recover data job.", "pod", leaderPod.Name) } @@ -325,7 +334,7 @@ var WaitUntilRecoverJobFinished = xstorev1reconcile.NewStepBinder("WaitUntilReco if leaderPod == nil { return flow.RetryAfter(5*time.Second, "Leader pod not found") } - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") if err != nil { return flow.Error(err, "Unable to get xstore recover data job", "pod", leaderPod.Name) @@ -348,16 +357,15 @@ var RemoveRecoverJob = xstorev1reconcile.NewStepBinder("RemoveRecoverJob", if err != nil { return flow.Error(err, "Unable to get leaderPod for xcluster.") } - job, err := rc.GetXStoreJob(util.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") - if err != nil { + job, err := rc.GetXStoreJob(name.GetStableNameSuffix(xstore, leaderPod.Name) + "-recover") + if err != nil && !apierrors.IsNotFound(err) { return flow.Error(err, "Unable to get xstore recover data job", "pod", leaderPod.Name) } - if job == nil { - return flow.Continue("Recover job already removed!") - } - err = rc.Client().Delete(rc.Context(), job, client.PropagationPolicy(metav1.DeletePropagationBackground)) - if client.IgnoreNotFound(err) != nil { - return flow.Error(err, "Unable to remove recover job", "job-name", job.Name) + if job != nil { + err = rc.Client().Delete(rc.Context(), job, client.PropagationPolicy(metav1.DeletePropagationBackground)) + if client.IgnoreNotFound(err) != nil { + return flow.Error(err, "Unable to remove recover job", "job-name", job.Name) + } } return flow.Continue("Recover job removed!") }) diff --git a/pkg/operator/v1/xstore/steps/instance/restorejob.go b/pkg/operator/v1/xstore/steps/instance/restorejob.go index 7fbb857..15de5fa 100644 --- a/pkg/operator/v1/xstore/steps/instance/restorejob.go +++ b/pkg/operator/v1/xstore/steps/instance/restorejob.go @@ -21,7 +21,7 @@ import ( k8shelper "github.com/alibaba/polardbx-operator/pkg/k8s/helper" "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" - "github.com/alibaba/polardbx-operator/pkg/util" + "github.com/alibaba/polardbx-operator/pkg/util/name" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,7 +59,7 @@ func patchTaskConfigMapVolumeAndVolumeMounts(xstore *xstorev1.XStore, podSpec *c VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: util.StableName(xstore, "restore"), + Name: name.StableName(xstore, "restore"), }, }, }, @@ -105,7 +105,7 @@ func newRestoreDataJob(xstore *xstorev1.XStore, targetPod *corev1.Pod) *batchv1. return &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ - Name: util.StableName(xstore, util.GetStableNameSuffix(xstore, targetPod.Name)+"-restore"), + Name: name.StableName(xstore, name.GetStableNameSuffix(xstore, targetPod.Name)+"-restore"), Namespace: xstore.Namespace, Labels: map[string]string{ xstoremeta.LabelName: xstore.Name, diff --git a/pkg/operator/v1/xstore/steps/instance/support_legacy.go b/pkg/operator/v1/xstore/steps/instance/support_legacy.go new file mode 100644 index 0000000..72b2fe6 --- /dev/null +++ b/pkg/operator/v1/xstore/steps/instance/support_legacy.go @@ -0,0 +1,192 @@ +package instance + +import ( + "bytes" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/featuregate" + "github.com/alibaba/polardbx-operator/pkg/k8s/control" + xstoreexec "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/command" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/convention" + xstoremeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/meta" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/plugin/common/channel" + xstorev1reconcile "github.com/alibaba/polardbx-operator/pkg/operator/v1/xstore/reconcile" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "strings" + "time" +) + +func WhenNeedAdapt(binders ...control.BindFunc) control.BindFunc { + return xstorev1reconcile.NewStepIfBinder("WhenNeedAdapt", + func(rc *xstorev1reconcile.Context, log logr.Logger) (bool, error) { + if featuregate.EnforceClusterIpXStorePod.Enabled() { + annotations := rc.MustGetXStore().GetAnnotations() + if val, ok := annotations[xstoremeta.AnnotationAdapting]; ok && xstoremeta.IsAdaptingTrue(val) { + podServcies, err := rc.GetXStorePodServices() + if podServcies == nil || len(podServcies) == 0 { + return true, nil + } + return false, err + } + } + return false, nil + }, + binders..., + ) +} + +var FlushClusterMetadata = xstorev1reconcile.NewStepBinder("FlushClusterMetadata", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + leaderPod, err := GetLeaderPod(rc, flow.Logger(), true) + if err != nil || leaderPod == nil { + return flow.RetryErr(err, "Failed to get leader pod") + } + pods, err := rc.GetXStorePods() + if err != nil { + return flow.RetryErr(err, "Failed to get pods") + } + for _, pod := range pods { + action := "reset-cluster-info-to-learner" + if pod.Name == leaderPod.Name { + action = "reset-cluster-info-to-local" + } + cmd := xstoreexec.NewCanonicalCommandBuilder().Consensus().PrepareHandleIndicate(action).Build() + rc.ExecuteCommandOn(&pod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + }) + } + for _, pod := range pods { + cmd := xstoreexec.NewCanonicalCommandBuilder().Consensus().SetReadonly().Build() + rc.ExecuteCommandOn(&pod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + Timeout: time.Minute * 1, + }) + } + var shutdownFunc = func(pod *corev1.Pod) { + cmd := xstoreexec.NewCanonicalCommandBuilder().Engine().Shutdown().Build() + rc.ExecuteCommandOn(pod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + }) + cmd = xstoreexec.NewCanonicalCommandBuilder().Process().KillAllMyProcess().Build() + rc.ExecuteCommandOn(pod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + }) + } + for _, pod := range pods { + if pod.Name != leaderPod.Name { + shutdownFunc(&pod) + } + } + shutdownFunc(leaderPod) + return flow.Continue("FlushClusterMetadata Success.") + }, +) + +func GetLeaderPod(rc *xstorev1reconcile.Context, logger logr.Logger, force bool) (*corev1.Pod, error) { + pods, err := rc.GetXStorePods() + if err != nil { + return nil, err + } + var leaderPod *corev1.Pod + for _, pod := range pods { + role, _, err := ReportRoleAndCurrentLeader(rc, &pod, logger) + if err != nil { + continue + } + if role == xstoremeta.RoleLeader { + leaderPod = pod.DeepCopy() + break + } + } + if leaderPod == nil && force { + var maxAppliedIndex int64 = -1 + for _, pod := range pods { + if pod.Labels[xstoremeta.LabelNodeSet] == "log" { + continue + } + localInfo, err := ShowThis(rc, &pod, logger, true) + if err != nil { + return nil, err + } + appliedIndex, err := strconv.ParseInt(localInfo.AppliedIndex, 10, 64) + if appliedIndex > maxAppliedIndex { + maxAppliedIndex = appliedIndex + leaderPod = &pod + } + } + } + return leaderPod, nil +} + +var UpdateSharedConfigMap = xstorev1reconcile.NewStepBinder("UpdateSharedConfigMap", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + cm, err := rc.GetXStoreConfigMap(convention.ConfigMapTypeShared) + if err != nil { + return flow.RetryErr(err, "failed to get shared configmap") + } + sharedChannel, err := parseChannelFromConfigMap(cm) + if err != nil { + return flow.Error(err, "Unable to parse shared channel from config map.") + } + for i, sharedChannelNode := range sharedChannel.Nodes { + podService, err := rc.GetXStoreServiceForPod(sharedChannelNode.Pod) + if err != nil || podService.Spec.ClusterIP == "None" { + return flow.RetryErr(err, "Failed to get pod service", "pod", sharedChannelNode.Pod) + } + newNode := sharedChannelNode + newNode.Host = podService.Spec.ClusterIP + sharedChannel.Nodes[i] = newNode + } + cm.Data[channel.SharedChannelKey] = sharedChannel.String() + err = rc.Client().Update(rc.Context(), cm) + if err != nil { + return flow.Error(err, "Unable to update shared config map.") + } + return flow.Pass() + }, +) + +var ReAddFollower = xstorev1reconcile.NewStepBinder("ReAddFollower", + func(rc *xstorev1reconcile.Context, flow control.Flow) (reconcile.Result, error) { + leaderPod, err := GetLeaderPod(rc, flow.Logger(), false) + if err != nil { + return flow.RetryErr(err, "Failed to get leader pod") + } + cm, err := rc.GetXStoreConfigMap(convention.ConfigMapTypeShared) + if err != nil { + return flow.RetryErr(err, "failed to get shared configmap") + } + sharedChannel, err := parseChannelFromConfigMap(cm) + if err != nil { + return flow.Error(err, "Unable to parse shared channel from config map.") + } + for _, sharedChannelNode := range sharedChannel.Nodes { + if sharedChannelNode.Pod == leaderPod.Name { + continue + } + nodeAddress := fmt.Sprintf("%s:%d", sharedChannelNode.Host, sharedChannelNode.Port) + cmd := xstoreexec.NewCanonicalCommandBuilder().Consensus().AddLearner(nodeAddress).Build() + var buffer bytes.Buffer + err = rc.ExecuteCommandOn(leaderPod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + Stdout: &buffer, + }) + flow.Logger().Error(err, "Failed to UpdateClusterInfo", "pod", leaderPod.Name) + response := buffer.String() + if strings.Contains(response, "ERROR") && !strings.Contains(response, "Target node already exists") { + flow.Logger().Error(err, "Failed to UpdateClusterInfo", "pod", leaderPod.Name) + } + buffer.Reset() + cmd = xstoreexec.NewCanonicalCommandBuilder().Consensus().ChangeLearnerToFollower(nodeAddress).Build() + err = rc.ExecuteCommandOn(leaderPod, "engine", cmd, control.ExecOptions{ + Logger: flow.Logger(), + Stdout: &buffer, + }) + flow.Logger().Info("change learner to follower", "response", buffer.String(), "pod", leaderPod.Name) + flow.Logger().Error(err, "Failed to ChangeLearnerToFollower", "pod", leaderPod.Name) + } + return flow.Pass() + }, +) diff --git a/pkg/operator/v1/xstore/steps/instance/volumes.go b/pkg/operator/v1/xstore/steps/instance/volumes.go index e753259..cdffe89 100644 --- a/pkg/operator/v1/xstore/steps/instance/volumes.go +++ b/pkg/operator/v1/xstore/steps/instance/volumes.go @@ -80,7 +80,20 @@ var PrepareHostPathVolumes = xstorev1reconcile.NewStepBinder("PrepareHostPathVol Type: corev1.HostPathDirectory, } } - xstore.Status.BoundVolumes = volumes + if xstore.Status.BoundVolumes == nil { + xstore.Status.BoundVolumes = volumes + } else { + for pod, newVolume := range volumes { + volume, ok := xstore.Status.BoundVolumes[pod] + if !ok { + return flow.Error(errors.New("failed to find pod in BoundVolumes"), "", "") + } + volume.Pod = newVolume.Pod + volume.HostPath = newVolume.HostPath + volume.LogHostPath = newVolume.LogHostPath + volume.Type = newVolume.Type + } + } return flow.Continue("Host path volumes prepared.") }, diff --git a/pkg/pitr/context.go b/pkg/pitr/context.go new file mode 100644 index 0000000..5aa41d6 --- /dev/null +++ b/pkg/pitr/context.go @@ -0,0 +1,23 @@ +package pitr + +import ( + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" + "github.com/go-logr/logr" + "go.uber.org/atomic" +) + +type Context struct { + TaskConfig *TaskConfig + Logger logr.Logger + RestoreBinlogs []RestoreBinlog + ConsistentXStoreCount int + CpHeartbeatXid uint64 + Borders map[string]binlog.EventOffset + LastErr error + RecoverTxsBytes []byte + Closed atomic.Bool +} + +func (pCtx *Context) NeedConsistentPoint() bool { + return pCtx.ConsistentXStoreCount > 1 +} diff --git a/pkg/pitr/driver.go b/pkg/pitr/driver.go new file mode 100644 index 0000000..cc3864e --- /dev/null +++ b/pkg/pitr/driver.go @@ -0,0 +1,94 @@ +package pitr + +import ( + "encoding/json" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/util/defaults" + "net/http" + "os" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sync" + "sync/atomic" +) + +const ( + DefaultSpillOutDirectory = "/workspace/spill" + EnvSpillOutDirectory = "EnvSpillOutDirectory" + DefaultConfigFilepath = "/workspace/conf/config.json" + EnvConfigFilepath = "EnvConfigFilepath" +) + +var configValue atomic.Value + +func Run() error { + configFilepath := defaults.NonEmptyStrOrDefault(os.Getenv(EnvConfigFilepath), DefaultConfigFilepath) + spillOutDirectory := defaults.NonEmptyStrOrDefault(os.Getenv(EnvSpillOutDirectory), DefaultSpillOutDirectory) + logger := zap.New(zap.UseDevMode(true)).WithName("pitr") + config, err := os.ReadFile(configFilepath) + if err != nil { + logger.Error(err, fmt.Sprintf("failed to read filepath=%s", configFilepath)) + return err + } + logger.Info("config content", "config", string(config)) + var taskConfig TaskConfig + err = json.Unmarshal(config, &taskConfig) + if err != nil { + logger.Error(err, "failed to parse config file") + return err + } + taskConfig.SpillDirectory = spillOutDirectory + configValue.Store(taskConfig) + pCtx := &Context{ + TaskConfig: &taskConfig, + Logger: zap.New(zap.UseDevMode(true)).WithName("pitr"), + } + + var waitGroup sync.WaitGroup + waitGroup.Add(1) + go func() { + defer func() { + waitGroup.Done() + obj := recover() + if obj != nil { + pCtx.Logger.Info("panic", "obj", obj) + } + }() + steps := []Step{ + LoadAllBinlog, + PrepareBinlogMeta, + CollectInterestedTxEvents, + Checkpoint, + } + for _, step := range steps { + err := step(pCtx) + if err != nil { + pCtx.LastErr = err + break + } + } + }() + waitGroup.Wait() + err = FinishAndStartHttpServer(pCtx) + if err != nil { + pCtx.Logger.Error(err, "failed to start http server") + } + return nil +} + +func RunAsync() *sync.WaitGroup { + var waitGroup sync.WaitGroup + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + Run() + }() + return &waitGroup +} + +func Exit() { + config := configValue.Load().(TaskConfig) + if config.HttpServerPort != 0 { + http.Get(fmt.Sprintf("http://127.0.0.1:%d/exit", config.HttpServerPort)) + //ignore err + } +} diff --git a/pkg/pitr/dto.go b/pkg/pitr/dto.go new file mode 100644 index 0000000..2c68dc2 --- /dev/null +++ b/pkg/pitr/dto.go @@ -0,0 +1,36 @@ +package pitr + +type TaskConfig struct { + Namespace string `json:"namespace,omitempty"` + PxcName string `json:"pxc_name,omitempty"` + PxcUid string `json:"pxc_uid,omitempty"` + SinkName string `json:"sinkName,omitempty"` + SinkType string `json:"sinkType,omitempty"` + SpillDirectory string `json:"spill_directory,omitempty"` + HpfsEndpoint string `json:"hpfs_endpoint,omitempty"` + FsEndpoint string `json:"fs_endpoint,omitempty"` + XStores map[string]*XStoreConfig `json:"xstores,omitempty"` + Timestamp uint64 `json:"timestamp,omitempty"` + BinlogChecksum string `json:"binlog_checksum,omitempty"` + HttpServerPort int `json:"http_server_port,omitempty"` +} + +type XStoreConfig struct { + GlobalConsistent bool `json:"global_consistent,omitempty"` + XStoreName string `json:"xstore_name,omitempty"` + XStoreUid string `json:"xstore_uid,omitempty"` + BackupSetStartIndex uint64 `json:"backupset_start_index,omitempty"` + HeartbeatSname string `json:"heartbeat_sname,omitempty"` + Pods map[string]*PodConfig `json:"pods,omitempty"` +} + +type PodConfig struct { + PodName string `json:"pod_name,omitempty"` + Host string `json:"host,omitempty"` + LogDir string `json:"log_dir,omitempty"` +} + +type HttpBinlogFileInfo struct { + Filename string `json:"filename,omitempty"` + Length *uint64 `json:"length,omitempty"` +} diff --git a/pkg/pitr/restore_binlog.go b/pkg/pitr/restore_binlog.go new file mode 100644 index 0000000..00623e1 --- /dev/null +++ b/pkg/pitr/restore_binlog.go @@ -0,0 +1,489 @@ +package pitr + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/algo" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/binlog/event" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/tx" + "github.com/alibaba/polardbx-operator/pkg/hpfs/backupbinlog" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" + "github.com/google/uuid" + "github.com/pkg/errors" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" +) + +const CheckRangeSeconds = 5 + +var CutIgnoredEventType = map[string]bool{ + "Consensus": true, + "PreviousConsensusIndex": true, +} + +type LocalSource struct { + FsIp string `json:"fs_ip,omitempty"` + FsPort int `json:"fs_port,omitempty"` + NodeName string `json:"node_name,omitempty"` + DataFilepath string `json:"data_filepath,omitempty"` +} + +type RemoteSource struct { + FsIp string `json:"fs_ip,omitempty"` + FsPort int `json:"fs_port,omitempty"` + Sink *config.Sink `json:"Sink,omitempty"` + MetaFilepath string `json:"meta_filepath,omitempty"` + DataFilepath string `json:"data_filepath,omitempty"` +} + +type BinlogSource struct { + Filename string `json:"filename,omitempty"` + LSource *LocalSource `json:"local_source,omitempty"` + RSource *RemoteSource `json:"remote_source,omitempty"` + BinlogChecksum string `json:"binlog_checksum,omitempty"` + RangeStartOffset *uint32 `json:"range_start_offset,omitempty"` + RangeEndOffset *uint32 `json:"range_end_offset,omitempty"` + TruncateLength *uint64 `json:"truncate_length,omitempty"` + Length uint64 `json:"Length,omitempty"` + Version string `json:"version,omitempty"` + Hash string `json:"hash,omitempty"` + Timestamp uint64 `json:"timestamp,omitempty"` + StartIndex uint64 `json:"start_index,omitempty"` + HeartbeatTxEvents []tx.Event `json:"heartbeat_tx_events,omitempty"` + RangeTxEvents []tx.Event `json:"range_tx_events,omitempty"` +} + +func (b *BinlogSource) GetTrueLength() *uint64 { + if b.TruncateLength != nil { + return b.TruncateLength + } + if b.RangeEndOffset != nil { + offset := uint64(*b.RangeEndOffset) + return &offset + } + return &b.Length +} + +func (b *BinlogSource) Copy() *BinlogSource { + tmp := *b + return &tmp +} + +func (b *BinlogSource) String() string { + jsonContent, _ := json.Marshal(b) + return string(jsonContent) +} + +func (b *BinlogSource) OpenStream() (io.ReadCloser, error) { + reader, err := b.OpenRemoteStream() + if err != nil { + fmt.Println(err) + } + if reader == nil { + reader, err = b.OpenLocalStream() + } + return reader, err +} + +func (b *BinlogSource) OpenRemoteStream() (io.ReadCloser, error) { + if b.RSource != nil { + fileClient := filestream.NewFileClient(b.RSource.FsIp, b.RSource.FsPort, nil) + fileClient.InitWaitChan() + action := filestream.GetClientActionBySinkType(b.RSource.Sink.Type) + if action == filestream.InvalidAction { + panic(fmt.Sprintf("invalid sinkType %s", b.RSource.Sink.Type)) + } + metadata := filestream.ActionMetadata{ + Action: action, + Filepath: b.RSource.MetaFilepath, + RequestId: uuid.New().String(), + Sink: b.RSource.Sink.Name, + } + metaReader, metaWriter := io.Pipe() + go func() { + defer metaWriter.Close() + _, err := fileClient.Download(metaWriter, metadata) + if err != nil { + fmt.Println(err) + } + }() + err := fileClient.WaitForDownload() + if err != nil { + return nil, err + } + defer metaReader.Close() + metaBytes, err := io.ReadAll(metaReader) + if err != nil { + return nil, err + } + var bf backupbinlog.BinlogFile + err = json.Unmarshal(metaBytes, &bf) + if err != nil { + return nil, err + } + b.Timestamp = bf.EventTimestamp + b.Hash = bf.Sha256 + b.Version = bf.Version + b.StartIndex = bf.StartIndex + dataReader, dataWriter := io.Pipe() + go func() { + defer dataWriter.Close() + _, err = fileClient.Download(dataWriter, filestream.ActionMetadata{ + Action: action, + Filepath: b.RSource.DataFilepath, + RequestId: uuid.New().String(), + Sink: b.RSource.Sink.Name, + }) + if err != nil { + fmt.Println(err) + } + }() + err = fileClient.WaitForDownload() + if err != nil { + fmt.Println(err) + dataReader.Close() + return nil, err + } + b.Length = fileClient.GetLastLen() + return dataReader, nil + } + return nil, errors.New("invalid remote source") +} + +func (b *BinlogSource) OpenLocalStream() (io.ReadCloser, error) { + if b.LSource != nil { + fileClient := filestream.NewFileClient(b.LSource.FsIp, b.LSource.FsPort, nil) + fileClient.InitWaitChan() + + var err error + dataReader, dataWriter := io.Pipe() + metadata := filestream.ActionMetadata{ + Action: filestream.DownloadRemote, + Filepath: b.LSource.DataFilepath, + RequestId: uuid.New().String(), + RedirectAddr: filestream.GetRemoteAddrByNodeName(b.LSource.NodeName), + } + go func() { + defer dataWriter.Close() + _, err = fileClient.Download(dataWriter, metadata) + if err != nil { + fmt.Println(err) + } + }() + err = fileClient.WaitForDownload() + if err != nil { + dataReader.Close() + return nil, err + } + headBytes, err := common.ReadBytes(dataReader, backupbinlog.BufferSizeBytes) + if len(headBytes) > 0 { + startIndex, eventTimestamp, err := backupbinlog.GetBinlogFileBeginInfo(headBytes, filepath.Base(metadata.Filepath), b.BinlogChecksum) + if err != nil { + return nil, err + } + b.StartIndex = startIndex + b.Timestamp = eventTimestamp + } + dataReader.Close() + dataReaderAgain, dataWriterAgain := io.Pipe() + + go func() { + defer dataWriterAgain.Close() + _, err = fileClient.Download(dataWriterAgain, metadata) + if err != nil { + fmt.Println(err) + } + }() + err = fileClient.WaitForDownload() + if err != nil { + dataReaderAgain.Close() + return nil, err + } + b.Length = fileClient.GetLastLen() + return dataReaderAgain, nil + } + return nil, nil +} + +func (b *BinlogSource) getBinlogFilename() string { + myFilepath := "" + if b.LSource != nil { + myFilepath = b.LSource.DataFilepath + } + if b.RSource != nil { + myFilepath = b.RSource.DataFilepath + } + if myFilepath == "" { + panic(errors.New("empty binlog filename")) + } + return filepath.Base(myFilepath) +} + +func (b *BinlogSource) GetBinlogFileNum() int64 { + filename := b.getBinlogFilename() + splitStrs := strings.Split(filename, ".") + if len(splitStrs) != 2 { + panic(fmt.Sprintf("invalid binlog filename %s", filename)) + } + num, err := strconv.ParseInt(splitStrs[1], 10, 64) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("failed to parse int, filename = %s ", filename))) + } + return num +} + +type RestoreBinlog struct { + GlobalConsistent bool `json:"global_consistent,omitempty"` + PxcName string `json:"pxc_name,omitempty"` + PxcUid string `json:"pxc_uid,omitempty"` + XStoreName string `json:"xstore_name,omitempty"` + XStoreUid string `json:"xstore_uid,omitempty"` + PodName string `json:"pod_name,omitempty"` + StartIndex uint64 `json:"start_index,omitempty"` + Timestamp uint64 `json:"timestamp,omitempty"` + Version uint64 `json:"version,omitempty"` + HeartbeatSname string `json:"heartbeat_sname,omitempty"` + Sources []BinlogSource `json:"sources,omitempty"` + ResultSources []BinlogSource `json:"result_sources,omitempty"` + spillFilepath string `json:"spill_filepath,omitempty"` +} + +func newRestoreBinlog(pxcName string, pxcUid string, xStoreName string, xStoreUid string, podName string, startIndex uint64, timestamp uint64, version uint64, heartbeatSname string, sources []BinlogSource) *RestoreBinlog { + if sources == nil { + sources = make([]BinlogSource, 0) + } + return &RestoreBinlog{ + PxcName: pxcName, + PxcUid: pxcUid, + XStoreName: xStoreName, + XStoreUid: xStoreUid, + PodName: podName, + StartIndex: startIndex, + Timestamp: timestamp, + Version: version, + HeartbeatSname: heartbeatSname, + Sources: sources, + } +} + +func (r *RestoreBinlog) SetSpillFilepath(spillFilepath string) { + r.spillFilepath = spillFilepath +} + +func (r *RestoreBinlog) GetSpillFilepath(spillFilepath string) string { + return r.spillFilepath +} + +func (r *RestoreBinlog) SpillResultSources() error { + if r.ResultSources != nil { + bytes, _ := json.Marshal(r.ResultSources) + err := os.WriteFile(r.spillFilepath, bytes, 0644) + if err != nil { + return err + } + r.ResultSources = nil + } + return nil +} + +func (r *RestoreBinlog) LoadResultSources() error { + if r.ResultSources == nil { + bytes, err := os.ReadFile(r.spillFilepath) + if err != nil { + return err + } + err = json.Unmarshal(bytes, &r.ResultSources) + if err != nil { + return err + } + } + return nil +} + +func (r *RestoreBinlog) SortSources() { + if len(r.Sources) > 0 { + sort.Slice(r.Sources, func(i, j int) bool { + return r.Sources[i].GetBinlogFileNum() < r.Sources[j].GetBinlogFileNum() + }) + } +} + +func (r *RestoreBinlog) SearchByTimestamp() error { + r.SortSources() + result := make([]BinlogSource, 0) + var preBinlogSource *BinlogSource + for _, binlogSource := range r.Sources { + reader, err := binlogSource.OpenStream() + if reader == nil { + if err == nil { + err = errors.New("") + } + return errors.Wrap(err, fmt.Sprintf("failed to get reader, binlog sources=%s", binlogSource.String())) + } + reader.Close() + if r.StartIndex > binlogSource.StartIndex { + preBinlogSource = binlogSource.Copy() + continue + } + + if r.Timestamp-CheckRangeSeconds <= binlogSource.Timestamp { + if len(result) > 0 { + r.searchRangeInfo(&result[len(result)-1], r.Timestamp-CheckRangeSeconds, r.Timestamp) + } + if r.Timestamp < binlogSource.Timestamp { + break + } + } + + if r.StartIndex <= binlogSource.StartIndex { + if r.StartIndex < binlogSource.StartIndex && len(result) == 0 { + if preBinlogSource == nil && len(result) == 0 { + return errors.New("startIndex is smaller than binlog start index") + } + result = append(result, *preBinlogSource) + } + result = append(result, binlogSource) + preBinlogSource = nil + } + } + if len(result) == 0 && preBinlogSource != nil && preBinlogSource.StartIndex <= r.StartIndex { + result = append(result, *preBinlogSource) + } + if len(result) > 0 { + lastBinlogSource := result[len(result)-1] + if lastBinlogSource.RangeStartOffset == nil && lastBinlogSource.RangeEndOffset == nil { + beginTimestamp := r.Timestamp - CheckRangeSeconds + if !r.GlobalConsistent { + beginTimestamp = 0 + } + r.searchRangeInfo(&result[len(result)-1], beginTimestamp, r.Timestamp) + } + } + r.ResultSources = result + return nil +} + +func (r *RestoreBinlog) searchRangeInfo(binlogSource *BinlogSource, startTs uint64, endTs uint64) { + reader, err := binlogSource.OpenStream() + if err != nil || reader == nil { + panic(fmt.Sprintf("failed to get reader stream, binlogSource=%s", binlogSource.String())) + } + defer reader.Close() + opts := []binlog.LogEventScannerOption{ + binlog.WithBinlogFile(binlogSource.getBinlogFilename()), + binlog.WithChecksumAlgorithm(binlogSource.BinlogChecksum), + binlog.WithLogEventHeaderFilter(func(header event.LogEventHeader) bool { + headerTs := uint64(header.EventTimestamp()) + if startTs <= headerTs && headerTs < endTs { + if binlogSource.RangeStartOffset == nil { + offset := header.EventEndPosition() - header.TotalEventLength() + binlogSource.RangeStartOffset = &offset + } + return true + } + if headerTs >= endTs { + if _, ok := CutIgnoredEventType[header.EventType()]; ok { + if binlogSource.RangeEndOffset == nil { + offset := header.EventEndPosition() - header.TotalEventLength() + binlogSource.RangeEndOffset = &offset + } + reader.Close() + } + } + return false + }), + } + scanner, err := binlog.NewLogEventScanner(bufio.NewReader(reader), opts...) + if err != nil { + panic(fmt.Sprintf("failed to get binlog event scanner, binlogSource=%s", binlogSource.String())) + } + + heartbeatTxEvents := make([]tx.Event, 0) + rangeTxEvents := make([]tx.Event, 0) + var latestHeartbeatPrepareTxEvent *tx.Event + uniqueMap := map[uint64]bool{} + eventHandler := func(txEvent *tx.Event) error { + if !r.GlobalConsistent { + return nil + } + copiedTxEvent := *txEvent + if copiedTxEvent.Type == tx.Prepare && copiedTxEvent.HeartbeatRowsLogEvents != nil { + for _, rowsEv := range copiedTxEvent.HeartbeatRowsLogEvents { + if updateRowsEv, ok := rowsEv.(*event.UpdateRowsEvent); ok { + hrow, err := algo.ExtractHeartbeatFieldsFromRowsEvent(updateRowsEv) + if err != nil { + return fmt.Errorf("unable to extract heartbeat rows event: %w", err) + } + if bytes.Equal([]byte(r.HeartbeatSname), []byte(hrow.Sname)) { + latestHeartbeatPrepareTxEvent = &copiedTxEvent + break + } + } + } + } + + if copiedTxEvent.Type == tx.Prepare { + rangeTxEvents = append(rangeTxEvents, copiedTxEvent) + } + + if copiedTxEvent.Type == tx.Commit { + _, exist := uniqueMap[copiedTxEvent.XID] + if latestHeartbeatPrepareTxEvent != nil && copiedTxEvent.XID == latestHeartbeatPrepareTxEvent.XID { + if (copiedTxEvent.Ts >> 22 / 1000) >= endTs { + if binlogSource.RangeEndOffset == nil { + header := copiedTxEvent.Raw.EventHeader() + offset := header.EventEndPosition() - header.TotalEventLength() + binlogSource.RangeEndOffset = &offset + } + return tx.StopParse + } + if !exist { + heartbeatTxEvents = append(heartbeatTxEvents, *latestHeartbeatPrepareTxEvent, copiedTxEvent) + } else { + heartbeatTxEvents[len(heartbeatTxEvents)-1].Ts = copiedTxEvent.Ts + } + } + if !exist { + rangeTxEvents = append(rangeTxEvents, copiedTxEvent) + } + uniqueMap[copiedTxEvent.XID] = true + + } + return nil + } + err = tx.NewTransactionEventParser(scanner).Parse(eventHandler) + if err != nil { + fmt.Println("NewTransactionEventParser Parse err=", err) + } + if len(heartbeatTxEvents) > 0 { + binlogSource.HeartbeatTxEvents = heartbeatTxEvents + } + if len(rangeTxEvents) > 0 { + binlogSource.RangeTxEvents = rangeTxEvents + } +} + +func (r *RestoreBinlog) CheckValid() bool { + var hasRangeStart bool + var hasRangeEnd bool + if r.ResultSources != nil { + for _, resultSource := range r.ResultSources { + if resultSource.RangeStartOffset != nil { + hasRangeStart = true + } + if resultSource.RangeEndOffset != nil { + hasRangeEnd = true + } + } + } + return hasRangeEnd && hasRangeStart +} diff --git a/pkg/pitr/restore_binlog_test.go b/pkg/pitr/restore_binlog_test.go new file mode 100644 index 0000000..01cafb7 --- /dev/null +++ b/pkg/pitr/restore_binlog_test.go @@ -0,0 +1,92 @@ +package pitr + +import ( + "context" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/hpfs" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" + "github.com/alibaba/polardbx-operator/pkg/hpfs/remote" + "strconv" + "testing" + "time" +) + +func startFileServer() *filestream.FileServer { + config.ConfigFilepath = "/Users/busu/tmp/filestream/config.yaml" + config.InitConfig() + flowControl := filestream.NewFlowControl(filestream.FlowControlConfig{ + MaxFlow: 1 << 40, // 1 byte/s + TotalFlow: (1 << 40) * 10, + MinFlow: 1 << 40, + BufferSize: 1 << 10, + }) + flowControl.Start() + fileServer := filestream.NewFileServer("", 9999, ".", flowControl) + go func() { + fileServer.Start() + }() + time.Sleep(1 * time.Second) + return fileServer +} + +func TestSearchByTimestamp(t *testing.T) { + startFileServer() + pxcName := "polardb-x-2" + pxcUid := "16bd261e-ac47-42d7-bb2f-f2ff940d6780" + xStoreName := "polardb-x-2-wt9x-dn-1" + xStoreUid := "f38bea9c-2cac-4a27-ae21-997a7e30d737" + podName := "polardb-x-2-wt9x-dn-1-cand-1" + var startIndex uint64 = 1983339 + var timestamp uint64 = 1678193950 + var version uint64 = 1678182799 + heartbeatSname := "pitr_sname" + //1678193948 + rb := newRestoreBinlog(pxcName, pxcUid, xStoreName, xStoreUid, podName, startIndex, timestamp, version, heartbeatSname, []BinlogSource{ + { + BinlogChecksum: "crc32", + RSource: &RemoteSource{ + FsIp: "127.0.0.1", + FsPort: 9999, + Sink: &config.Sink{ + Name: "default", + Type: "oss", + }, + MetaFilepath: "binlogbackup/default/polardb-x-2/16bd261e-ac47-42d7-bb2f-f2ff940d6780/polardb-x-2-wt9x-dn-1/f38bea9c-2cac-4a27-ae21-997a7e30d737/polardb-x-2-wt9x-dn-1-cand-1/1678182799/0_1000/binlog-meta/mysql_bin.000005.txt", + DataFilepath: "binlogbackup/default/polardb-x-2/16bd261e-ac47-42d7-bb2f-f2ff940d6780/polardb-x-2-wt9x-dn-1/f38bea9c-2cac-4a27-ae21-997a7e30d737/polardb-x-2-wt9x-dn-1-cand-1/1678182799/0_1000/binlog-file/mysql_bin.000005", + }, + }, + }) + rb.SearchByTimestamp() + rb.SetSpillFilepath("./test.json") + rb.SpillResultSources() + rb.LoadResultSources() + fmt.Println("test") +} + +func TestOssAliyunTest(t *testing.T) { + config.ConfigFilepath = "/Users/busu/tmp/filestream/config.yaml" + config.InitConfig() + _, params, auth, fileServiceName, _ := hpfs.GetFileServiceParam("default", "oss") + fileService, _ := remote.GetFileService(fileServiceName) + resultFiles := make([]string, 0) + resultFilesPtr := &resultFiles + ctx := context.WithValue(context.Background(), common.AffectedFiles, resultFilesPtr) + //xstoreBinlogDir := config.GetXStorePodBinlogStorageDirectory("default", "", request.GetPxcUid(), request.GetXStoreName(), request.GetXStoreUid(), request.GetPodName()) + params["deadline"] = strconv.FormatInt(time.Now().Unix(), 10) + ft, err := fileService.ListAllFiles(ctx, "binlogbackup/default/polardb-x-2/16bd261e-ac47-42d7-bb2f-f2ff940d6780/polardb-x-2-wt9x-dn-1/f38bea9c-2cac-4a27-ae21-997a7e30d737/polardb-x-2-wt9x-dn-1-cand-0", auth, params) + if err == nil { + err = ft.Wait() + } + fmt.Sprintf("sd") +} + +func TestTimeZone(t *testing.T) { + location, _ := time.LoadLocation("UTC") + + val, err := time.ParseInLocation("2006-01-02T15:04:05Z", "2023-03-14T14:50:27Z", location) + fmt.Println(val.Unix()) + fmt.Println(time.Time{}.Unix()) + fmt.Println(err) +} diff --git a/pkg/pitr/workflow.go b/pkg/pitr/workflow.go new file mode 100644 index 0000000..e1e1413 --- /dev/null +++ b/pkg/pitr/workflow.go @@ -0,0 +1,563 @@ +package pitr + +import ( + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/algo" + "github.com/alibaba/polardbx-operator/pkg/binlogtool/tx" + "github.com/alibaba/polardbx-operator/pkg/hpfs/common" + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + hpfs "github.com/alibaba/polardbx-operator/pkg/hpfs/proto" + "github.com/go-logr/logr" + "github.com/google/uuid" + pkgErrors "github.com/pkg/errors" + "golang.org/x/exp/slices" + "google.golang.org/grpc" + "io" + "math" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" +) + +const ( + CopyBufferSize = 1 << 20 +) + +type Step func(pCtx *Context) error + +func MustMarshalJSON(obj interface{}) string { + b, err := json.Marshal(obj) + if err != nil { + panic(err) + } + return string(b) +} + +func LoadAllBinlog(pCtx *Context) error { + pCtx.Logger.Info("LoadAllBinlog...") + taskConfig := pCtx.TaskConfig + if taskConfig == nil { + return errors.New("taskConfig must not be nil") + } + hpfsClient, err := NewHpfsClient(taskConfig.HpfsEndpoint) + if err != nil { + return pkgErrors.Wrap(err, fmt.Sprintf("failed to get hpfs client, endpoint=%s", taskConfig.HpfsEndpoint)) + } + filestreamIp, filestreamPort := common.ParseNetAddr(taskConfig.FsEndpoint) + restoreBinlogs := make([]RestoreBinlog, 0) + for _, xStore := range taskConfig.XStores { + if xStore.GlobalConsistent { + pCtx.ConsistentXStoreCount += 1 + } + for _, pod := range xStore.Pods { + if pod.Host == "" { + continue + } + var binlogSources []BinlogSource + pCtx.Logger.Info("list local binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir) + resp, err := hpfsClient.ListLocalBinlogList(context.Background(), &hpfs.ListLocalBinlogListRequest{ + Host: &hpfs.Host{NodeName: pod.Host}, + LogDir: pod.LogDir, + }) + if err != nil { + pCtx.Logger.Error(err, "failed to list local binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir) + return err + } + if resp.Version != "" { + for _, binlogFile := range resp.GetBinlogFiles() { + binlogFilename := filepath.Base(binlogFile) + absoluteFilepath := filepath.Join(pod.LogDir, binlogFilename) + binlogSources = append(binlogSources, BinlogSource{ + Filename: binlogFilename, + LSource: &LocalSource{ + FsIp: filestreamIp, + FsPort: filestreamPort, + NodeName: pod.Host, + DataFilepath: absoluteFilepath, + }, + BinlogChecksum: taskConfig.BinlogChecksum, + Version: resp.Version, + Timestamp: taskConfig.Timestamp, + StartIndex: xStore.BackupSetStartIndex, + }) + } + } + + pCtx.Logger.Info("finish list local binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir, "response", MustMarshalJSON(resp)) + + pCtx.Logger.Info("remote binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir) + remoteResp, err := hpfsClient.ListRemoteBinlogList(context.Background(), &hpfs.ListRemoteBinlogListRequest{ + Namespace: taskConfig.Namespace, + PxcName: taskConfig.PxcName, + PxcUid: taskConfig.PxcUid, + XStoreName: xStore.XStoreName, + XStoreUid: xStore.XStoreUid, + SinkName: taskConfig.SinkName, + SinkType: taskConfig.SinkType, + PodName: pod.PodName, + }) + if err != nil { + pCtx.Logger.Error(err, "failed to list local binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir) + return err + } + pCtx.Logger.Info("finish remote binlog list", "pod", pod.PodName, "host", pod.Host, "logDir", pod.LogDir, "resp", MustMarshalJSON(remoteResp)) + versionFilesMap := map[string]map[string][]string{} + for _, file := range remoteResp.GetFiles() { + elements := strings.Split(file, "/") + if len(elements) < 4 { + err := errors.New(fmt.Sprintf("invaid filepath = %s", file)) + pCtx.Logger.Error(err, "failed to get version") + return err + } + version := elements[len(elements)-4] + _, ok := versionFilesMap[version] + if !ok { + versionFilesMap[version] = map[string][]string{} + } + filename := filepath.Base(file) + index := 0 + if strings.HasSuffix(filename, ".txt") { + filename = filename[:len(filename)-4] + index = 1 + } + if versionFilesMap[version][filename] == nil { + versionFilesMap[version][filename] = make([]string, 2) + } + versionFilesMap[version][filename][index] = file + } + + for version, versionVal := range versionFilesMap { + for filename, vals := range versionVal { + dataFilepath := vals[0] + metaFilepath := vals[1] + if dataFilepath == "" || metaFilepath == "" { + err := fmt.Errorf("invalid filepath, dataFilepath = %s, metaFilepath = %s", dataFilepath, metaFilepath) + return err + } + binlogSources = append(binlogSources, BinlogSource{ + Filename: filename, + RSource: &RemoteSource{ + FsIp: filestreamIp, + FsPort: filestreamPort, + Sink: &config.Sink{ + Name: taskConfig.SinkName, + Type: taskConfig.SinkType, + }, + DataFilepath: dataFilepath, + MetaFilepath: metaFilepath, + }, + BinlogChecksum: taskConfig.BinlogChecksum, + Version: version, + Timestamp: taskConfig.Timestamp, + StartIndex: xStore.BackupSetStartIndex, + }) + } + } + groupedBinlogSources := groupBinlogSources(binlogSources) + for k, bs := range groupedBinlogSources { + version, err := strconv.ParseInt(k, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse version = %s", k) + } + restoreBinlogs = append(restoreBinlogs, RestoreBinlog{ + GlobalConsistent: xStore.GlobalConsistent, + PxcName: taskConfig.PxcName, + PxcUid: taskConfig.PxcUid, + XStoreName: xStore.XStoreName, + XStoreUid: xStore.XStoreUid, + PodName: pod.PodName, + StartIndex: xStore.BackupSetStartIndex, + Timestamp: taskConfig.Timestamp, + Version: uint64(version), + HeartbeatSname: xStore.HeartbeatSname, + Sources: bs, + spillFilepath: filepath.Join(taskConfig.SpillDirectory, uuid.New().String()), + }) + } + } + } + pCtx.RestoreBinlogs = restoreBinlogs + return nil +} + +func groupBinlogSources(sources []BinlogSource) map[string][]BinlogSource { + result := map[string][]BinlogSource{} + for _, source := range sources { + _, ok := result[source.Version] + if !ok { + result[source.Version] = []BinlogSource{} + } + result[source.Version] = append(result[source.Version], source) + } + + for k := range result { + sort.Slice(result[k], func(i, j int) bool { + return result[k][i].GetBinlogFileNum() < result[k][j].GetBinlogFileNum() + }) + tmpSources := make([]BinlogSource, 0) + for _, source := range result[k] { + if len(tmpSources) > 0 && tmpSources[len(tmpSources)-1].Filename == source.Filename { + if tmpSources[len(tmpSources)-1].RSource == nil { + tmpSources[len(tmpSources)-1].RSource = source.RSource + } else if tmpSources[len(tmpSources)-1].LSource == nil { + tmpSources[len(tmpSources)-1].LSource = source.LSource + } + continue + } + tmpSources = append(tmpSources, source) + } + result[k] = tmpSources + } + return result +} + +func NewHpfsClient(endpoint string) (hpfs.HpfsServiceClient, error) { + hpfsConn, err := grpc.Dial(endpoint, grpc.WithInsecure()) + if err != nil { + return nil, err + } + return hpfs.NewHpfsServiceClient(hpfsConn), nil +} + +func PrepareBinlogMeta(pCtx *Context) error { + pCtx.Logger.Info("PrepareBinlogMeta...") + flags := map[string]bool{} + newRestoreBinlogs := make([]RestoreBinlog, 0, len(pCtx.RestoreBinlogs)/2) + for _, restoreBinlog := range pCtx.RestoreBinlogs { + _, ok := flags[restoreBinlog.XStoreName] + if !ok { + err := restoreBinlog.SearchByTimestamp() + if err != nil { + pCtx.Logger.Error(err, "failed to SearchByTimestamp", "pxcName", restoreBinlog.PxcName, "xStoreName", restoreBinlog.XStoreName, "podName", restoreBinlog.PodName, "version", restoreBinlog.Version) + return err + } + if restoreBinlog.CheckValid() { + err := restoreBinlog.SpillResultSources() + if err != nil { + pCtx.Logger.Error(err, fmt.Sprintf("failed to spill result sources of podName = %s, version = %d", restoreBinlog.PodName, restoreBinlog.Version)) + return err + } + newRestoreBinlogs = append(newRestoreBinlogs, restoreBinlog) + flags[restoreBinlog.XStoreName] = true + } else { + pCtx.Logger.Info("invalid searchByTimestamp result", "pxcName", restoreBinlog.PxcName, "xStoreName", restoreBinlog.XStoreName, "podName", restoreBinlog.PodName, "version", restoreBinlog.Version) + } + } + } + pCtx.RestoreBinlogs = newRestoreBinlogs + if len(pCtx.RestoreBinlogs) != len(pCtx.TaskConfig.XStores) { + // failed. some xStore binlog does not exist between the backup set start index and the timestamp + restoreBinlogXstores := make([]string, 0, len(pCtx.RestoreBinlogs)) + for _, restoreBinlog := range pCtx.RestoreBinlogs { + restoreBinlogXstores = append(restoreBinlogXstores, restoreBinlog.XStoreName) + } + slices.Sort(restoreBinlogXstores) + configXStores := make([]string, 0, len(pCtx.TaskConfig.XStores)) + for xStoreName, _ := range pCtx.TaskConfig.XStores { + configXStores = append(configXStores, xStoreName) + } + slices.Sort(configXStores) + err := errors.New("failed get proper binlogs") + pCtx.Logger.Error(err, "", "expect", configXStores, "actual", restoreBinlogXstores) + return err + } + return nil +} + +func CollectInterestedTxEvents(pCtx *Context) error { + pCtx.Logger.Info("CollectInterestedTxEvents...") + if !pCtx.NeedConsistentPoint() { + for i, _ := range pCtx.RestoreBinlogs { + pCtx.RestoreBinlogs[i].LoadResultSources() + } + pCtx.Logger.Info("Skip CollectInterestedTxEvents...") + return nil + } + xidCntMap := map[uint64]int{} + xidTsMap := map[uint64]tx.Event{} + for k, rb := range pCtx.RestoreBinlogs { + if !rb.GlobalConsistent { + continue + } + rb.LoadResultSources() + xidCntMapInner := map[uint64]bool{} + for _, rs := range rb.ResultSources { + for _, ht := range rs.HeartbeatTxEvents { + if ht.Type == tx.Commit && ht.Ts > 0 { + xid := ht.XID + xidTsMap[xid] = ht + if _, innerOk := xidCntMapInner[xid]; !innerOk { + _, ok := xidCntMap[xid] + if !ok { + xidCntMap[xid] = 0 + } + xidCntMap[xid] = xidCntMap[xid] + 1 + } + } + } + } + rb.SpillResultSources() + pCtx.RestoreBinlogs[k] = rb + } + candidateHeartbeatTxEvents := make([]tx.Event, 0) + for xid, cnt := range xidCntMap { + // ignore meta db + if cnt == len(pCtx.RestoreBinlogs)-1 { + candidateHeartbeatTxEvents = append(candidateHeartbeatTxEvents, xidTsMap[xid]) + } + } + sort.Slice(candidateHeartbeatTxEvents, func(i, j int) bool { + return candidateHeartbeatTxEvents[i].Ts < candidateHeartbeatTxEvents[j].Ts + }) + if len(candidateHeartbeatTxEvents) < 2 { + return errors.New(fmt.Sprintf("heartbeat count expect >= 2, actual = %d", len(candidateHeartbeatTxEvents))) + } + beginXid := candidateHeartbeatTxEvents[len(candidateHeartbeatTxEvents)-2].XID + endXid := candidateHeartbeatTxEvents[len(candidateHeartbeatTxEvents)-1].XID + pCtx.CpHeartbeatXid = endXid + for j, rb := range pCtx.RestoreBinlogs { + err := rb.LoadResultSources() + pCtx.RestoreBinlogs[j] = rb + if err != nil { + return err + } + if !rb.GlobalConsistent { + continue + } + for k, rs := range rb.ResultSources { + if len(rs.RangeTxEvents) > 0 { + newRangeTxEvents := make([]tx.Event, 0) + startIndex := math.MaxInt + endIndex := math.MaxInt + for i, txEvent := range rs.RangeTxEvents { + switch txEvent.XID { + case beginXid: + if txEvent.Type == tx.Prepare { + startIndex = i + } + case endXid: + if txEvent.Type == tx.Commit { + endIndex = i + } + } + if i >= startIndex && i <= endIndex { + newRangeTxEvents = append(newRangeTxEvents, txEvent) + } + } + rs.RangeTxEvents = newRangeTxEvents + rb.ResultSources[k] = rs + } + } + pCtx.RestoreBinlogs[j] = rb + } + return nil +} + +type myTransactionParser struct { + sources []BinlogSource +} + +func (tp *myTransactionParser) Parse(h tx.EventHandler) error { + for _, source := range tp.sources { + for _, txEvent := range source.RangeTxEvents { + h(&txEvent) + } + } + return nil +} + +func Checkpoint(pCtx *Context) error { + pCtx.Logger.Info("Checkpoint...") + if !pCtx.NeedConsistentPoint() { + pCtx.Logger.Info("Skip Checkpoint...") + return nil + } + txParsers := map[string]tx.TransactionEventParser{} + for _, restoreBinlog := range pCtx.RestoreBinlogs { + if !restoreBinlog.GlobalConsistent { + continue + } + txParsers[restoreBinlog.XStoreName] = &myTransactionParser{ + sources: restoreBinlog.ResultSources, + } + } + recoverableTxs, borders, err := algo.NewSeekConsistentPoint(txParsers, pCtx.CpHeartbeatXid).Perform() + if err != nil { + return err + } + pCtx.RecoverTxsBytes, err = algo.SerializeCpResult(recoverableTxs, borders) + if err != nil { + return err + } + pCtx.Borders = borders + for i := 0; i < len(pCtx.RestoreBinlogs); i++ { + restoreBinlog := pCtx.RestoreBinlogs[i] + if !restoreBinlog.GlobalConsistent { + continue + } + eOffset, ok := pCtx.Borders[restoreBinlog.XStoreName] + if !ok { + err := fmt.Errorf("failed to get event offset of xstore name = %s", restoreBinlog.XStoreName) + pCtx.Logger.Error(err, "") + return err + } + var found bool + for j := 0; j < len(restoreBinlog.ResultSources); j++ { + if restoreBinlog.ResultSources[j].getBinlogFilename() == eOffset.File { + offset := eOffset.Offset + restoreBinlog.ResultSources[j].TruncateLength = &offset + pCtx.RestoreBinlogs[i] = restoreBinlog + found = true + break + } + } + if !found { + err := fmt.Errorf("impossible. event offset = %s", eOffset.String()) + pCtx.Logger.Error(err, "failed to find binlog source") + return err + } + } + return nil +} + +type httpLogger struct { + mux *http.ServeMux + logger logr.Logger +} + +func (h *httpLogger) ServeHTTP(w http.ResponseWriter, r *http.Request) { + requestInfo := map[string]string{} + requestInfo["host"] = r.Host + requestInfo["path"] = r.URL.Path + vals := r.URL.Query() + for k, val := range vals { + requestInfo[k] = fmt.Sprintf("%+v", val) + } + requestId := vals.Get("requestid") + h.logger.Info(MustMarshalJSON(requestInfo), "requestid", requestId) + begin := time.Now().UnixMilli() + h.mux.ServeHTTP(w, r) + h.logger.Info(fmt.Sprintf("time cost %d ms", time.Now().UnixMilli()-begin), "requestid", requestId) +} + +func FinishAndStartHttpServer(pCtx *Context) error { + pCtx.Logger.Info("FinishAndStartHttpServer...") + mux := &http.ServeMux{} + mux.HandleFunc("/status", func(writer http.ResponseWriter, request *http.Request) { + if pCtx.LastErr == nil { + writer.WriteHeader(http.StatusOK) + writer.Write([]byte("success")) + } else { + writer.WriteHeader(http.StatusInternalServerError) + writer.Write([]byte(fmt.Sprintf("%+v", pCtx.LastErr))) + } + }) + + mux.HandleFunc("/lastErr", func(writer http.ResponseWriter, request *http.Request) { + if pCtx.LastErr != nil { + writer.Write([]byte(fmt.Sprintf("%+v", pCtx.LastErr))) + } + writer.Write([]byte("")) + }) + + mux.HandleFunc("/context", func(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte(MustMarshalJSON(pCtx))) + }) + + mux.HandleFunc("/binlogs", func(writer http.ResponseWriter, request *http.Request) { + query := request.URL.Query() + xStore := query.Get("xstore") + if xStore == "" { + writer.Write([]byte("xstore param is required")) + return + } + for _, restoreBinlog := range pCtx.RestoreBinlogs { + if restoreBinlog.XStoreName == xStore { + files := make([]HttpBinlogFileInfo, 0, len(restoreBinlog.ResultSources)) + for _, source := range restoreBinlog.ResultSources { + trueLength := *source.GetTrueLength() + files = append(files, HttpBinlogFileInfo{ + Filename: source.getBinlogFilename(), + Length: &trueLength, + }) + } + writer.Write([]byte(MustMarshalJSON(files))) + return + } + } + writer.Write([]byte("[]")) + }) + + mux.HandleFunc("/download/binlog", func(writer http.ResponseWriter, request *http.Request) { + query := request.URL.Query() + xStore := query.Get("xstore") + if xStore == "" { + writer.WriteHeader(http.StatusNotFound) + writer.Write([]byte("xstore param is required")) + return + } + filename := query.Get("filename") + if filename == "" { + writer.WriteHeader(http.StatusNotFound) + writer.Write([]byte("filename param is required")) + } + for _, restoreBinlog := range pCtx.RestoreBinlogs { + if restoreBinlog.XStoreName == xStore { + for _, source := range restoreBinlog.ResultSources { + if filename == source.getBinlogFilename() { + reader, err := source.OpenStream() + if err != nil { + pCtx.Logger.Error(err, "failed to open stream", "xstoreName", xStore, "filename", filename) + writer.WriteHeader(http.StatusInternalServerError) + return + } + length := int64(*source.GetTrueLength()) + writer.Header().Add("Content-Length", strconv.FormatInt(length, 10)) + writer.WriteHeader(http.StatusAccepted) + io.CopyBuffer(writer, io.LimitReader(reader, length), make([]byte, CopyBufferSize)) + reader.Close() + return + } + } + } + } + writer.WriteHeader(http.StatusNotFound) + }) + + mux.HandleFunc("/download/recovertxs", func(writer http.ResponseWriter, request *http.Request) { + if pCtx.RecoverTxsBytes != nil { + writer.WriteHeader(http.StatusOK) + gw, _ := gzip.NewWriterLevel(writer, gzip.BestSpeed) + defer gw.Close() + gw.Write(pCtx.RecoverTxsBytes) + } else { + writer.WriteHeader(http.StatusNotFound) + } + }) + + mux.HandleFunc("/exit", func(writer http.ResponseWriter, request *http.Request) { + if !pCtx.Closed.CAS(false, true) { + return + } + exitCode := 0 + if pCtx.LastErr != nil { + exitCode = 1 + } + go func() { + time.Sleep(2 * time.Second) + os.Exit(exitCode) + }() + }) + listenAddr := fmt.Sprintf(":%d", pCtx.TaskConfig.HttpServerPort) + pCtx.Logger.Info("start http server, listen " + listenAddr) + err := http.ListenAndServe(listenAddr, &httpLogger{mux: mux, logger: pCtx.Logger}) + return err +} diff --git a/pkg/pitr/workflow_test.go b/pkg/pitr/workflow_test.go new file mode 100644 index 0000000..4f8c939 --- /dev/null +++ b/pkg/pitr/workflow_test.go @@ -0,0 +1,158 @@ +package pitr + +import ( + "github.com/alibaba/polardbx-operator/pkg/hpfs/config" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" + "os" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "testing" + "time" +) + +func startFileServer1() *filestream.FileServer { + config.ConfigFilepath = "/Users/busu/tmp/filestream/config.yaml" + config.InitConfig() + flowControl := filestream.NewFlowControl(filestream.FlowControlConfig{ + MaxFlow: 1 << 40, // 1 byte/s + TotalFlow: (1 << 40) * 10, + MinFlow: 1 << 40, + BufferSize: 1 << 10, + }) + flowControl.Start() + fileServer := filestream.NewFileServer("0.0.0.0", 9999, ".", flowControl) + go func() { + fileServer.Start() + }() + time.Sleep(1 * time.Second) + return fileServer +} + +func TestFinishAndStartHttpServer(t *testing.T) { + startFileServer1() + pCtx := &Context{ + TaskConfig: &TaskConfig{ + HttpServerPort: 10000, + }, + Logger: zap.New(zap.UseDevMode(true)).WithName("pitr"), + RestoreBinlogs: []RestoreBinlog{ + { + XStoreName: "busu", + ResultSources: []BinlogSource{ + { + Filename: "mysql_bin.000010", + RSource: &RemoteSource{ + FsIp: "127.0.0.1", + FsPort: 9999, + Sink: &config.Sink{ + Name: "default", + Type: "oss", + }, + MetaFilepath: "binlogbackup/default/polardb-x-2/16bd261e-ac47-42d7-bb2f-f2ff940d6780/polardb-x-2-wt9x-dn-0/326e21cb-7796-4fa1-b573-2fce08a872f9/polardb-x-2-wt9x-dn-0-cand-1/1678182799/0_1000/binlog-meta/mysql_bin.000010.txt", + DataFilepath: "binlogbackup/default/polardb-x-2/16bd261e-ac47-42d7-bb2f-f2ff940d6780/polardb-x-2-wt9x-dn-0/326e21cb-7796-4fa1-b573-2fce08a872f9/polardb-x-2-wt9x-dn-0-cand-1/1678182799/0_1000/binlog-file/mysql_bin.000010", + }, + }, + }, + }, + }, + } + FinishAndStartHttpServer(pCtx) +} + +func PrepareConfig() { + config := TaskConfig{ + Namespace: "default", + PxcName: "polardb-x-2", + PxcUid: "16bd261e-ac47-42d7-bb2f-f2ff940d6780", + SinkName: "default", + SinkType: "oss", + HpfsEndpoint: "127.0.0.1:6543", + FsEndpoint: "127.0.0.1:6643", + XStores: map[string]*XStoreConfig{ + "polardb-x-2-wt9x-dn-0": { + GlobalConsistent: true, + XStoreName: "polardb-x-2-wt9x-dn-0", + XStoreUid: "326e21cb-7796-4fa1-b573-2fce08a872f9", + BackupSetStartIndex: 2643291, + HeartbeatSname: "pitr_sname", + Pods: map[string]*PodConfig{ + "polardb-x-2-wt9x-dn-0-cand-0": { + PodName: "polardb-x-2-wt9x-dn-0-cand-0", + Host: "cn-beijing.172.16.2.204", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-0-cand-0/log", + }, + "polardb-x-2-wt9x-dn-0-cand-1": { + PodName: "polardb-x-2-wt9x-dn-0-cand-1", + Host: "cn-beijing.172.16.2.53", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-0-cand-1/log", + }, + "polardb-x-2-wt9x-dn-0-log-0": { + PodName: "polardb-x-2-wt9x-dn-0-log-0", + Host: "cn-beijing.172.16.2.118", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-0-log-0/log", + }, + }, + }, + "polardb-x-2-wt9x-dn-1": { + GlobalConsistent: true, + XStoreName: "polardb-x-2-wt9x-dn-1", + XStoreUid: "f38bea9c-2cac-4a27-ae21-997a7e30d737", + BackupSetStartIndex: 2643291, + HeartbeatSname: "pitr_sname", + Pods: map[string]*PodConfig{ + "polardb-x-2-wt9x-dn-1-cand-0": { + PodName: "polardb-x-2-wt9x-dn-1-cand-0", + Host: "cn-beijing.172.16.2.205", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-1-cand-0/log", + }, + "polardb-x-2-wt9x-dn-1-cand-1": { + PodName: "polardb-x-2-wt9x-dn-1-cand-1", + Host: "cn-beijing.172.16.2.119", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-1-cand-1/log", + }, + "polardb-x-2-wt9x-dn-1-log-0": { + PodName: "polardb-x-2-wt9x-dn-1-log-0", + Host: "cn-beijing.172.16.2.54", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-dn-1-log-0/log", + }, + }, + }, + "polardb-x-2-wt9x-gms": { + GlobalConsistent: false, + XStoreName: "polardb-x-2-wt9x-gms-cand-0", + XStoreUid: "a949057a-6b8d-42f0-b3a8-4c6f41350496", + BackupSetStartIndex: 100, + HeartbeatSname: "pitr_sname", + Pods: map[string]*PodConfig{ + "polardb-x-2-wt9x-gms-cand-0": { + PodName: "polardb-x-2-wt9x-gms-cand-0", + Host: "cn-beijing.172.16.2.54", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-gms-cand-0/log", + }, + "polardb-x-2-wt9x-gms-cand-1": { + PodName: "polardb-x-2-wt9x-gms-cand-1", + Host: "cn-beijing.172.16.2.118", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-gms-cand-1/log", + }, + "polardb-x-2-wt9x-gms-log-0": { + PodName: "polardb-x-2-wt9x-gms-log-0", + Host: "cn-beijing.172.16.2.205", + LogDir: "/data/xstore/default/polardb-x-2-wt9x-gms-log-0/log", + }, + }, + }, + }, + Timestamp: 1678193950, + BinlogChecksum: "crc32", + } + configContent := MustMarshalJSON(config) + spillDir := "/Users/busu/tmp/pitr/spill" + os.Setenv(EnvSpillOutDirectory, spillDir) + configFilepath := "/Users/busu/tmp/pitr/conf/config.json" + os.Setenv(EnvConfigFilepath, configFilepath) + os.WriteFile(configFilepath, []byte(configContent), 0644) +} + +func TestDo(t *testing.T) { + PrepareConfig() + Run() +} diff --git a/pkg/probe/prober.go b/pkg/probe/prober.go index b5355e0..d0fd665 100644 --- a/pkg/probe/prober.go +++ b/pkg/probe/prober.go @@ -21,8 +21,10 @@ import ( "database/sql" "errors" "fmt" + "io" "net/http" "strconv" + "strings" "time" _ "github.com/go-sql-driver/mysql" @@ -36,6 +38,7 @@ const ( TypePolarDBX = "polardbx" TypeXStore = "xstore" TypeSelf = "server" + TypeCdc = "cdc" ) type Prober struct { @@ -54,7 +57,7 @@ type Prober struct { func (p *Prober) valid() bool { for _, t := range []string{ - TypePolarDBX, TypeXStore, TypeSelf, + TypePolarDBX, TypeXStore, TypeSelf, TypeCdc, } { if p.target == t { return true @@ -134,6 +137,25 @@ func (p *Prober) ping() error { return p.db.PingContext(p.ctx) } +func (p *Prober) cdcConnect() error { + httpClient := http.Client{Timeout: p.timeout} + url := fmt.Sprintf("http://%s:%d/status", p.host, p.port) + resp, err := httpClient.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + bodyStr := strings.ToUpper(strings.TrimSpace(string(body))) + if bodyStr != "OK" { + return fmt.Errorf("unhealthy status body=%s , url=%s", bodyStr, url) + } + return nil +} + func (p *Prober) Liveness() error { switch p.target { case TypeXStore, TypePolarDBX: @@ -149,6 +171,8 @@ func (p *Prober) Liveness() error { } return p.ping() + case TypeCdc: + return p.cdcConnect() case TypeSelf: return nil default: diff --git a/pkg/probe/xstore_ext/plugin/xstore_galaxy.go b/pkg/probe/xstore_ext/plugin/xstore_galaxy.go index 0c575ec..acf0c73 100644 --- a/pkg/probe/xstore_ext/plugin/xstore_galaxy.go +++ b/pkg/probe/xstore_ext/plugin/xstore_galaxy.go @@ -31,15 +31,25 @@ import ( func init() { xstore_ext.RegisterXStoreExt("galaxy", newXStoreExt(func(ctx context.Context, host string, db *sql.DB) error { - // Check for private protocol port. - row := db.QueryRowContext(ctx, "select @@galaxyx_port") - var polarxPort uint16 - if err := row.Scan(&polarxPort); err != nil { + + //check if galaxy engine or xdb 8.0 + row := db.QueryRowContext(ctx, "select @@version") + var version string + if err := row.Scan(&version); err != nil { return err } + galaxyEngine := !strings.Contains(version, "X-Cluster") - if err := network.TestTcpConnectivity(ctx, host, polarxPort); err != nil { - return err + if galaxyEngine { + // Check for private protocol port. + row := db.QueryRowContext(ctx, "select @@galaxyx_port") + var polarxPort uint16 + if err := row.Scan(&polarxPort); err != nil { + return err + } + if err := network.TestTcpConnectivity(ctx, host, polarxPort); err != nil { + return err + } } if featuregate.EnableGalaxyClusterMode.Enabled() { diff --git a/pkg/util/name.go b/pkg/util/name/name.go similarity index 55% rename from pkg/util/name.go rename to pkg/util/name/name.go index d826037..9b8d394 100644 --- a/pkg/util/name.go +++ b/pkg/util/name/name.go @@ -14,15 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package name import ( "fmt" polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" - polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" + "github.com/alibaba/polardbx-operator/pkg/meta/core/gms/security" "strings" ) +// Naming functions related to restore phase + func StableNamePrefix(xstore *polardbxv1.XStore) string { if len(xstore.Status.Rand) > 0 { return fmt.Sprintf("%s-%s-", xstore.Name, xstore.Status.Rand) @@ -60,11 +62,65 @@ func XStoreBackupStableName(xstoreBackup *polardbxv1.XStoreBackup, suffix string return XStoreBackupStableNamePrefix(xstoreBackup) + suffix } -// BackupRootPath is used to identify backup set by backup time -func BackupRootPath(polardbxBackup *polardbxv1.PolarDBXBackup) string { - startTime := polardbxBackup.Status.StartTime - timestamp := startTime.Format("20060102150405") // golang standard format - rootPath := fmt.Sprintf("%s/%s/%s-%s", - polardbxmeta.BackupPath, polardbxBackup.Labels[polardbxmeta.LabelName], polardbxBackup.Name, timestamp) - return rootPath +// Splicer is a helper to splice object name, which also provides alternative name to ensure length of name is under limit +type Splicer struct { + Tokens *[]string + Delimiter string + Limit int + Prefix string +} + +func NewSplicer(options ...func(*Splicer)) *Splicer { + splicer := &Splicer{ + Delimiter: "-", + Limit: 63, + } + for _, option := range options { + option(splicer) + } + return splicer +} + +func (s *Splicer) getAbbreviateName(sourceName string) string { + hashVal := security.MustSha1Hash(sourceName) + if s.Prefix == "" { + return hashVal + } + return fmt.Sprintf("%s%s%s", s.Prefix, s.Delimiter, hashVal) +} + +func (s *Splicer) GetName() string { + name := strings.Join(*s.Tokens, s.Delimiter) + if s.Limit == 0 || len(name) < s.Limit { + return name + } + return s.getAbbreviateName(name) +} + +func WithTokens(tokens ...string) func(*Splicer) { + return func(splicer *Splicer) { + splicer.Tokens = &tokens + } +} + +func WithDelimiter(delimiter string) func(*Splicer) { + return func(splicer *Splicer) { + splicer.Delimiter = delimiter + } +} + +func WithLimit(limit int) func(*Splicer) { + return func(splicer *Splicer) { + splicer.Limit = limit + } +} + +func WithPrefix(prefix string) func(*Splicer) { + return func(splicer *Splicer) { + splicer.Prefix = prefix + } +} + +func NewSplicedName(options ...func(*Splicer)) string { + return NewSplicer(options...).GetName() } diff --git a/pkg/util/path/path.go b/pkg/util/path/path.go new file mode 100644 index 0000000..48557bc --- /dev/null +++ b/pkg/util/path/path.go @@ -0,0 +1,34 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import "strings" + +func NewPathFromStringSequence(sequence ...string) string { + return strings.Join(sequence, "/") +} + +// GetBaseNameFromPath gets last non-empty token in the path +func GetBaseNameFromPath(path string) string { + sequence := strings.Split(path, "/") + for i := len(sequence) - 1; i >= 0; i-- { + if len(sequence[i]) != 0 { + return sequence[i] + } + } + return "" +} diff --git a/pkg/webhook/polardbxbackup/validator.go b/pkg/webhook/polardbxbackup/validator.go new file mode 100644 index 0000000..f97b566 --- /dev/null +++ b/pkg/webhook/polardbxbackup/validator.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polardbxbackup + +import ( + "context" + "errors" + v1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/api/v1/polardbx" + "github.com/alibaba/polardbx-operator/pkg/hpfs/filestream" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/webhook/extension" + "github.com/go-logr/logr" + "github.com/google/uuid" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" + "strconv" + "strings" +) + +type Validator struct { + client.Reader + logr.Logger + configLoader func() config.Config + client *filestream.FileClient +} + +const magicString = "polardbx-filestream-validation" + +func (v *Validator) getFilestreamClient() (*filestream.FileClient, error) { + if v.client == nil { + hostPort := strings.SplitN(v.configLoader().Store().FilestreamServiceEndpoint(), ":", 2) + if len(hostPort) < 2 { + return nil, errors.New("invalid filestream endpoint, please check config of controller: " + + v.configLoader().Store().FilestreamServiceEndpoint()) + } + port, err := strconv.Atoi(hostPort[1]) + if err != nil { + return nil, errors.New("invalid filestream port, please check config of controller: " + hostPort[1]) + } + v.client = filestream.NewFileClient(hostPort[0], port, nil) + } + return v.client, nil +} + +func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) error { + pxcBackup := obj.(*v1.PolarDBXBackup) + + // validate storage configure + if pxcBackup.Spec.StorageProvider.StorageName == "" { + return field.Required(field.NewPath("spec", "storageProvider", "storageName"), + "storage name must be provided") + } + if pxcBackup.Spec.StorageProvider.Sink == "" { + return field.Required(field.NewPath("spec", "storageProvider", "sink"), + "sink must be provided") + } + filestreamAction, err := polardbx.NewBackupStorageFilestreamAction(pxcBackup.Spec.StorageProvider.StorageName) + if err != nil { + return field.Invalid(field.NewPath("spec", "storageProvider", "storageName"), + pxcBackup.Spec.StorageProvider.StorageName, "unsupported storage") + } + + // validate whether storage is available + fsClient, err := v.getFilestreamClient() + if err != nil { + return apierrors.NewInternalError(err) + } + actionMetadata := filestream.ActionMetadata{ + Action: filestreamAction.Upload, + Sink: pxcBackup.Spec.StorageProvider.Sink, + RequestId: uuid.New().String(), + Filename: magicString, + } + sentBytes, err := fsClient.Upload(strings.NewReader(magicString), actionMetadata) + if err != nil || sentBytes == 0 { + return field.Invalid(field.NewPath("spec", "storageProvider"), pxcBackup.Spec.StorageProvider, + "invalid storage, please check configuration of both backup and hpfs") + } + + return nil +} + +func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error { + oldBackup, newBackup := oldObj.(*v1.PolarDBXBackup), newObj.(*v1.PolarDBXBackup) + if oldBackup.Name != newBackup.Name { + return field.Forbidden(field.NewPath("metadata", "name"), "immutable field") + } + if oldBackup.Spec.Cluster != newBackup.Spec.Cluster { + return field.Forbidden(field.NewPath("spec", "cluster"), "immutable field") + } + if oldBackup.Spec.StorageProvider != newBackup.Spec.StorageProvider { + return field.Forbidden(field.NewPath("spec", "storageProvider"), "immutable field") + } + return nil +} + +func (v *Validator) ValidateDelete(ctx context.Context, obj runtime.Object) error { + return nil +} + +func NewPolarDBXBackupValidator(r client.Reader, logger logr.Logger, configLoader func() config.Config) extension.CustomValidator { + return &Validator{ + Reader: r, + Logger: logger, + configLoader: configLoader, + } +} diff --git a/pkg/webhook/polardbxbackup/webhook.go b/pkg/webhook/polardbxbackup/webhook.go new file mode 100644 index 0000000..a94bc0f --- /dev/null +++ b/pkg/webhook/polardbxbackup/webhook.go @@ -0,0 +1,41 @@ +/* +Copyright 2021 Alibaba Group Holding Limited. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polardbxbackup + +import ( + "context" + polardbxv1 "github.com/alibaba/polardbx-operator/api/v1" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/webhook/extension" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" +) + +func SetupWebhooks(ctx context.Context, mgr ctrl.Manager, apiPath string, configLoader func() config.Config) error { + gvk := schema.GroupVersionKind{ + Group: polardbxv1.GroupVersion.Group, + Version: polardbxv1.GroupVersion.Version, + Kind: "PolarDBXBackup", + } + + mgr.GetWebhookServer().Register(extension.GenerateValidatePath(apiPath, gvk), + extension.WithCustomValidator(&polardbxv1.PolarDBXBackup{}, + NewPolarDBXBackupValidator(mgr.GetAPIReader(), + mgr.GetLogger().WithName("webhook.validate.polardbxbackup"), + configLoader))) + return nil +} diff --git a/pkg/webhook/webhooks.go b/pkg/webhook/webhooks.go index da2a2f5..d97df2f 100644 --- a/pkg/webhook/webhooks.go +++ b/pkg/webhook/webhooks.go @@ -18,6 +18,8 @@ package webhook import ( "context" + "github.com/alibaba/polardbx-operator/pkg/operator/v1/config" + "github.com/alibaba/polardbx-operator/pkg/webhook/polardbxbackup" "net/http" ctrl "sigs.k8s.io/controller-runtime" @@ -29,7 +31,7 @@ import ( const ApiPath = "/apis/admission.polardbx.aliyun.com/v1" -func SetupWebhooks(ctx context.Context, mgr ctrl.Manager, configPath string) error { +func SetupWebhooks(ctx context.Context, mgr ctrl.Manager, configPath string, configLoader func() config.Config) error { // Hack: for discovery. Awful hacking. mgr.GetWebhookServer().Register(ApiPath, http.HandlerFunc( func(w http.ResponseWriter, request *http.Request) { @@ -51,5 +53,9 @@ func SetupWebhooks(ctx context.Context, mgr ctrl.Manager, configPath string) err return err } + if err := polardbxbackup.SetupWebhooks(ctx, mgr, ApiPath, configLoader); err != nil { + return err + } + return nil } diff --git a/test/framework/polardbxcluster/expect.go b/test/framework/polardbxcluster/expect.go index e9d93cd..ee62211 100644 --- a/test/framework/polardbxcluster/expect.go +++ b/test/framework/polardbxcluster/expect.go @@ -20,20 +20,16 @@ import ( "bytes" "context" "database/sql" + "errors" "fmt" - "path/filepath" "strings" "time" "github.com/alibaba/polardbx-operator/test/framework/polardbxparameter" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - - "k8s.io/client-go/util/homedir" - polardbxmeta "github.com/alibaba/polardbx-operator/pkg/operator/v1/polardbx/meta" v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/client-go/rest" "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -936,19 +932,13 @@ func (e *Expectation) ExpectDNParameterPersistenceOk(expectedParams map[string]s localPort := local.AcquireLocalPort() defer local.ReleaseLocalPort(localPort) - home := homedir.HomeDir() - kubeconfig := filepath.Join(home, ".kube", "config") - - cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - common.ExpectNoError(err, "could not get config") + cfg := framework.TestContext.KubeConfig + if cfg == nil { + common.ExpectNoError(errors.New("could not get config")) } setKubeConfig(cfg) - //cfg, err := rest.InClusterConfig() - //common.ExpectNoError(err, "not connectable") - clientset, err := kubernetes.NewForConfig(cfg) common.ExpectNoError(err, "not connectable") diff --git a/test/framework/polardbxparameter/wait.go b/test/framework/polardbxparameter/wait.go index 212e6f7..73265b1 100644 --- a/test/framework/polardbxparameter/wait.go +++ b/test/framework/polardbxparameter/wait.go @@ -59,6 +59,20 @@ func WaitForMyConfOverrideUpdates(clientset kubernetes.Interface, config *restcl s := strings.ReplaceAll(stdout.String(), " ", "") configs := strings.Split(s, "\n") + stdout, stderr = new(bytes.Buffer), new(bytes.Buffer) + err = ExecCmd(clientset, config, &pod, ns, "cat /data/config/my.cnf.override.version", nil, stdout, stderr) + if err != nil { + return false, nil + } + newVersion := stdout.String() + + stdout, stderr = new(bytes.Buffer), new(bytes.Buffer) + err = ExecCmd(clientset, config, &pod, ns, "cat /data/mysql/conf/my.cnf.override.version", nil, stdout, stderr) + if err != nil { + return false, nil + } + oldVersion := stdout.String() + nowConfigs := make(map[string]string) for _, config := range configs { if config == "" || config[0] == '[' { @@ -71,7 +85,7 @@ func WaitForMyConfOverrideUpdates(clientset kubernetes.Interface, config *restcl } for k, v := range expectedParams { - if nowConfigs[k] == v { + if nowConfigs[k] == v && newVersion == oldVersion { return true, nil } } diff --git a/tools/xstore/cli/binlogbackup.py b/tools/xstore/cli/binlogbackup.py index 01ebb7c..8ff3a26 100644 --- a/tools/xstore/cli/binlogbackup.py +++ b/tools/xstore/cli/binlogbackup.py @@ -123,11 +123,12 @@ def get_max_log_from_offset_gms(filestream_client, binlog_end_offset_path, binlo def get_max_log_from_cp(filestream_client, indexes_path, binlog_backup_dir, xstore_name, logger): indexes_local_path = os.path.join(binlog_backup_dir, "indexes") filestream_client.download_to_file(remote=indexes_path, local=indexes_local_path, logger=logger) + xstore_pattern = xstore_name + ':' # such as "pxc-dn-1:" with open(indexes_local_path, 'r') as f: for text_line in f.readlines(): - m = re.search(xstore_name, text_line) + m = re.search(xstore_pattern, text_line) if m: - max_log_info = text_line.split(xstore_name+":")[-1].strip() + max_log_info = text_line.split(xstore_pattern)[-1].strip() break logger.info("max_log_info:" + max_log_info) return max_log_info.split(':')[0], max_log_info.split(':')[1] diff --git a/tools/xstore/cli/consensus.py b/tools/xstore/cli/consensus.py index 28f57fe..b79dd19 100644 --- a/tools/xstore/cli/consensus.py +++ b/tools/xstore/cli/consensus.py @@ -11,14 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import click import re import sys - -import click -import pymysql - from core import channel, convention -from core.consensus import ConsensusRole, ConsensusExtRole, ConsensusNode, SlaveStatus +from core.consensus import ConsensusRole, ConsensusExtRole, ConsensusNode + from .common import global_mgr, print_rows @@ -254,6 +252,55 @@ def drop_learner(node): consensus_group.add_command(drop_learner) +@click.command(name='learner-to-follower') +@click.argument('node') +def chanage_to_follower_from_learner(node): + shared_channel = global_mgr.shared_channel() + addr = _get_addr_from_argument(node, shared_channel) + with global_mgr.consensus_manager() as mgr: + mgr.upgrade_learner_to_follower(addr) + + +consensus_group.add_command(chanage_to_follower_from_learner) + + +@click.command(name='enable-election') +def enable_election(): + with global_mgr.consensus_manager() as mgr: + mgr.enable_follower_election() + + +consensus_group.add_command(enable_election) + + +@click.command(name='disable-election') +def enable_election(): + with global_mgr.consensus_manager() as mgr: + mgr.disable_follower_election() + + +consensus_group.add_command(enable_election) + + +@click.command(name='update-cluster-info') +@click.argument('cluster-info') +def update_cluster_info(cluster_info): + with global_mgr.consensus_manager() as mgr: + mgr.update_cluster_info(cluster_info) + + +consensus_group.add_command(update_cluster_info) + + +@click.command(name='prepare-handle-indicate') +@click.argument('action') +def prepare_handle_indicate(action): + global_mgr.engine().prepare_handle_indicate(action) + + +consensus_group.add_command(prepare_handle_indicate) + + @click.command(name='slave-status') def show_status(): with global_mgr.consensus_manager() as mgr: @@ -265,14 +312,31 @@ def show_status(): 'slave_sql_running', 'slave_sql_running_state', 'seconds_behind_master', + 'last_errno', + 'last_error', + 'last_io_errno', + 'last_io_error', + 'last_sql_errno', + 'last_sql_error' ), rows=[(slave_status.relay_log_file, slave_status.relay_log_pos, slave_status.slave_io_running, slave_status.slave_sql_running, slave_status.slave_sql_running_state, - slave_status.seconds_behind_master)]) + slave_status.seconds_behind_master, slave_status.last_errno, slave_status.last_error, + slave_status.last_io_errno, slave_status.last_io_error, slave_status.last_sql_errno, + slave_status.last_sql_error)]) consensus_group.add_command(show_status) +@click.command(name='set-readonly') +def set_readonly(): + with global_mgr.consensus_manager() as mgr: + mgr.set_readonly() + + +consensus_group.add_command(set_readonly) + + @click.group(name='log') def consensus_log_group(): pass diff --git a/tools/xstore/cli/engine.py b/tools/xstore/cli/engine.py index aa6ecbf..db9c2bd 100644 --- a/tools/xstore/cli/engine.py +++ b/tools/xstore/cli/engine.py @@ -35,12 +35,17 @@ def version(): @click.command(name='parameter') -@click.option('-k', '--key', required=True, type=str) -@click.option('-v', '--value', required=True, type=str) +@click.option('-k', '--key', required=True, multiple=True) +@click.option('-v', '--value', required=True, multiple=True) def set_global(key, value): with global_mgr.new_connection() as conn: with conn.cursor() as cur: - cmd = "SET GLOBAL " + key + " = " + value + cmd = "SET GLOBAL " + n = 0 + for k in key: + cmd += k + " = " + value[n] + ", " + n += 1 + cmd = cmd[0:-2] cur.execute(cmd) conn.commit() diff --git a/tools/xstore/cli/myconfig.py b/tools/xstore/cli/myconfig.py index df85e02..f80564f 100644 --- a/tools/xstore/cli/myconfig.py +++ b/tools/xstore/cli/myconfig.py @@ -11,10 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import time import click from .common import global_mgr +from .utils import timer +from core.config.mysql import MySQLConfigManager @click.group(name='myconfig') @@ -23,8 +26,17 @@ def my_config_group(): @click.command(name='update') +@timer.timeout(300) def update(): - global_mgr.engine().update_config() + while True: + bigger_version = MySQLConfigManager.check_config_version( + global_mgr.engine().file_config_version, + global_mgr.engine().file_config_override_version, + ) + if bigger_version: + global_mgr.engine().update_config() + break + time.sleep(1) my_config_group.add_command(update) diff --git a/tools/xstore/cli/process.py b/tools/xstore/cli/process.py index 1155e78..b22392f 100644 --- a/tools/xstore/cli/process.py +++ b/tools/xstore/cli/process.py @@ -69,6 +69,18 @@ def kill_engine_all_process(): process_group.add_command(kill_engine_all_process) +@click.command(name='kill_mysqld') +def kill_process_mysqld(): + subprocess.Popen( + ["/usr/bin/sh", "-c", + "pid=`ps -ef |grep mysqld | grep loose-pod-name | grep defaults-file | grep -v mysqld_safe | awk '{print " + "$2}'`&&kill $pid"], + cwd=None, stdout=None, stderr=None) + + +process_group.add_command(kill_process_mysqld) + + @click.command(name='check_std_err_complete') @click.option('--filepath', type=str) def check_std_err_complete(filepath: str, keyword="completed OK!"): diff --git a/tools/xstore/cli/recover.py b/tools/xstore/cli/recover.py index 92dbebe..01d23d7 100644 --- a/tools/xstore/cli/recover.py +++ b/tools/xstore/cli/recover.py @@ -16,6 +16,7 @@ import os.path import click +import wget from core.context import Context from core.log import LogFactory @@ -45,10 +46,15 @@ def start(restore_context, target_pod, password): remote_cp_path = params["cpfilePath"] storage_name = params["storageName"] sink = params["sink"] + pitr_endpoint = params["pitrEndpoint"] if "pitrEndpoint" in params else "" - filestream_client = FileStreamClient(context, BackupStorage[str.upper(storage_name)], sink) local_cp_path = os.path.join(RESTORE_TEMP_DIR, "set.cp") - filestream_client.download_to_file(remote=remote_cp_path, local=local_cp_path, logger=logger) + if len(pitr_endpoint) == 0: + filestream_client = FileStreamClient(context, BackupStorage[str.upper(storage_name)], sink) + filestream_client.download_to_file(remote=remote_cp_path, local=local_cp_path, logger=logger) + else: + download_url = "/".join([pitr_endpoint, "download", "recovertxs"]) + wget.download(download_url, local_cp_path) recover_cmd = [context.bb_home, 'recover', '-f', local_cp_path, diff --git a/tools/xstore/cli/restore.py b/tools/xstore/cli/restore.py index 07cd01b..bd337c0 100644 --- a/tools/xstore/cli/restore.py +++ b/tools/xstore/cli/restore.py @@ -16,6 +16,7 @@ import configparser import fcntl import json +import os import shutil import subprocess import sys @@ -29,7 +30,8 @@ from core.context.mycnf_renderer import MycnfRenderer from core.backup_restore.storage.filestream_client import FileStreamClient, BackupStorage from core.backup_restore.utils import check_run_process - +import wget +import requests RESTORE_TEMP_DIR = "/data/mysql/restore" CONN_TIMEOUT = 30 @@ -53,6 +55,9 @@ def start(restore_context): binlog_dir_path = params["binlogDirPath"] storage_name = params["storageName"] sink = params["sink"] + pitr_endpoint = params["pitrEndpoint"] if "pitrEndpoint" in params else "" + pitr_xstore = params["pitrXStore"] if "pitrXStore" in params else "" + logger.info('start restore: backup_file_path=%s' % backup_file_path) context = Context() @@ -75,7 +80,8 @@ def start(restore_context): apply_backup_file(context, logger) - mysql_bin_list = download_binlogbackup_file(binlog_dir_path, filestream_client, logger) + mysql_bin_list = download_binlogbackup_file(binlog_dir_path, filestream_client, logger) if len( + pitr_endpoint) == 0 else download_pitr_binloglist(pitr_endpoint, pitr_xstore, logger) copy_binlog_to_new_path(mysql_bin_list, context, logger) @@ -144,6 +150,24 @@ def download_binlogbackup_file(binlog_dir_path, filestream_client, logger): return mysql_binlog_list +def download_pitr_binloglist(pitrEndpoint, xstore, logger): + binlogListUrl = "/".join([pitrEndpoint, "binlogs"]) + ("?xstore=%s" % xstore) + response = requests.get(binlogListUrl) + mysql_binlog_list = [] + if response.status_code == 200: + logger.info("binlogs http response %s" % response.content) + binlogs = json.loads(response.content) + for binlog in binlogs: + mysql_binlog_list.append(binlog['filename']) + else: + raise Exception("failed to get binlogs url = %s" % binlogListUrl) + for binlog in mysql_binlog_list: + downloadUrl = "/".join([pitrEndpoint, "download", "binlog"]) + ("?xstore=%s" % xstore) + "&" + ( + "filename=%s" % binlog) + wget.download(downloadUrl, os.path.join(RESTORE_TEMP_DIR, binlog)) + return mysql_binlog_list + + def copy_binlog_to_new_path(mysql_bin_list, context, logger): # copy backup binlog to new binlog path log_dir = context.volume_path(VOLUME_DATA, "log") @@ -199,8 +223,15 @@ def initialize_local_mycnf(context: Context, logger): override_config = configparser.ConfigParser(allow_no_value=True) override_config.read(context.mycnf_override_path) + overrides = [context.mycnf_system_config(), override_config] + if os.path.exists(context.file_config_override): + # override file has the highest priority + override_file_config = configparser.ConfigParser(allow_no_value=True) + override_file_config.read(context.file_config_override) + overrides += [override_file_config] + r = MycnfRenderer(context.mycnf_template_path) - r.render(extras=[context.mycnf_system_config(), override_config], fp=mycnf_file) + r.render(extras=overrides, fp=mycnf_file) # Release the lock fcntl.flock(mycnf_file.fileno(), fcntl.LOCK_UN) @@ -212,8 +243,8 @@ def apply_backup_file(context, logger): apply_backup_cmd = "" if context.is_galaxy80(): apply_backup_cmd = "%s --defaults-file=%s --prepare --target-dir=%s 2> %s/applybackup.log" \ - % (context.xtrabackup, context.mycnf_path, context.volume_path(VOLUME_DATA, 'data'), - context.volume_path(VOLUME_DATA, "log")) + % (context.xtrabackup, context.mycnf_path, context.volume_path(VOLUME_DATA, 'data'), + context.volume_path(VOLUME_DATA, "log")) elif context.is_xcluster57(): apply_backup_cmd = "%s --defaults-file=%s --apply-log %s 2> %s/applybackup.log" \ % (context.xtrabackup, context.mycnf_path, context.volume_path(VOLUME_DATA, 'data'), diff --git a/tools/xstore/cli/utils/__init__.py b/tools/xstore/cli/utils/__init__.py new file mode 100644 index 0000000..d8c567f --- /dev/null +++ b/tools/xstore/cli/utils/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2021 Alibaba Group Holding Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .timer import * diff --git a/tools/xstore/cli/utils/timer.py b/tools/xstore/cli/utils/timer.py new file mode 100644 index 0000000..6f360e5 --- /dev/null +++ b/tools/xstore/cli/utils/timer.py @@ -0,0 +1,41 @@ +# Copyright 2021 Alibaba Group Holding Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import signal + + +def timeout(sec): + """ + timeout decorator + :param sec: function raise TimeoutError after ? seconds + """ + def decorator(func): + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + + def _handle_timeout(signum, frame): + err_msg = f'Function {func.__name__} timed out after {sec} seconds' + raise TimeoutError(err_msg) + + signal.signal(signal.SIGALRM, _handle_timeout) + signal.alarm(sec) + try: + result = func(*args, **kwargs) + finally: + signal.alarm(0) + return result + + return wrapped_func + return decorator \ No newline at end of file diff --git a/tools/xstore/core/config/mysql.py b/tools/xstore/core/config/mysql.py index a870536..e68cf2f 100644 --- a/tools/xstore/core/config/mysql.py +++ b/tools/xstore/core/config/mysql.py @@ -14,6 +14,7 @@ import collections import configparser +import fcntl import os from typing import AnyStr, Any @@ -25,6 +26,8 @@ class MySQLConfigManager(ConfigManager): Config manager for MySQL (my.cnf). """ + opt_canonical_white_list = {"loose_performance_schema_instrument"} + def __init__(self, config_path): super().__init__(config_path) @@ -37,7 +40,7 @@ def canonical_options(cls, config: configparser.ConfigParser): for section, proxy in config.items(): for opt, value in proxy.items(): c_opt = cls._canonical_option_key(opt) - if c_opt != opt: + if c_opt != opt and c_opt not in MySQLConfigManager.opt_canonical_white_list: proxy.pop(opt) proxy[c_opt] = value return config @@ -77,6 +80,48 @@ def write_config(cls, path, config: configparser.ConfigParser, sort: bool = True with open(path, 'w') as f: config.write(fp=f) + @classmethod + def check_config_version(cls, config_version_path, override_version_path) -> bool: + if not os.path.exists(override_version_path): + return False + with open(override_version_path, 'r') as o: + override_version = o.read() + + if not os.path.exists(config_version_path): + return True + else: + with open(config_version_path, 'r+') as c: + config_version = c.read() + if int(override_version) > int(config_version): + return True + return False + + @classmethod + def write_config_version(cls, config_version_path, override_version_path): + if not os.path.exists(override_version_path): + override_version = "0" + else: + with open(override_version_path, 'r') as o: + override_version = o.read() + + if not os.path.exists(config_version_path): + with open(config_version_path, 'w+') as c: + fcntl.flock(c.fileno(), fcntl.LOCK_EX) + try: + c.write(override_version) + finally: + fcntl.flock(c.fileno(), fcntl.LOCK_UN) + else: + with open(config_version_path, 'r+') as c: + fcntl.flock(c.fileno(), fcntl.LOCK_EX) + try: + config_version = c.read() + if int(override_version) > int(config_version): + c.seek(0) + c.write(override_version) + finally: + fcntl.flock(c.fileno(), fcntl.LOCK_UN) + def _update_config(self, config: configparser.ConfigParser): # Write to resource first. config_tmp = os.path.join(os.path.dirname(self._config_path), 'my.cnf.tmp') diff --git a/tools/xstore/core/consensus/manager.py b/tools/xstore/core/consensus/manager.py index 58d8232..3cb9bde 100644 --- a/tools/xstore/core/consensus/manager.py +++ b/tools/xstore/core/consensus/manager.py @@ -12,13 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pymysql from abc import abstractmethod from contextlib import AbstractContextManager from enum import Enum from typing import NamedTuple, Union, Collection -import pymysql - CONSENSUS_ROLE_LEADER = 'leader' CONSENSUS_ROLE_FOLLOWER = 'follower' # special type of follower @@ -133,6 +132,13 @@ class RoleMismatchError(Exception): pass + # last_errno=r['last_errno'], + # last_error=r['last_error'], + # last_io_errno=r['last_io_errno'], + # last_io_error=r['last_io_error'], + # last_sql_errno=r['last_sql_errno'], + # last_sql_error=r['last_sql_error'] + class SlaveStatus(NamedTuple): """ the result of show slave status @@ -143,7 +149,12 @@ class SlaveStatus(NamedTuple): slave_sql_running: str slave_sql_running_state: str seconds_behind_master: float - + last_errno: str + last_error: str + last_io_errno: str + last_io_error: str + last_sql_errno: str + last_sql_error: str class AbstractConsensusManager(AbstractContextManager): def __init__(self, conn: pymysql.Connection, current_addr: str): @@ -303,7 +314,7 @@ def drop_follower(self, target: Union[str, ConsensusNode]): # FIXME: ConsensusNode -> ip:port @abstractmethod - def upgrade_learner_to_follower(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def upgrade_learner_to_follower(self, addr) -> ConsensusNode: """ Upgrade the learner to follower. @@ -313,7 +324,7 @@ def upgrade_learner_to_follower(self, target: Union[str, ConsensusNode]) -> Cons # FIXME: ConsensusNode -> ip:port @abstractmethod - def downgrade_follower_to_learner(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def downgrade_follower_to_learner(self, addr) -> ConsensusNode: """ Downgrade the follower to learner. @@ -389,3 +400,15 @@ def show_slave_status(self) -> SlaveStatus: """ return query result of "show slave status" """ + + @abstractmethod + def update_cluster_info(self, cluster_info): + """ + update cluster info by SQL + """ + + @abstractmethod + def set_readonly(self): + """ + set readonly + """ diff --git a/tools/xstore/core/consensus/manager_impl.py b/tools/xstore/core/consensus/manager_impl.py index fa227ce..9af8fd7 100644 --- a/tools/xstore/core/consensus/manager_impl.py +++ b/tools/xstore/core/consensus/manager_impl.py @@ -49,7 +49,13 @@ def _slave_status_from_row(r): slave_io_running=r['slave_io_running'], slave_sql_running=r['slave_sql_running'], slave_sql_running_state=r['slave_sql_running_state'], - seconds_behind_master=r['seconds_behind_master'] + seconds_behind_master=r['seconds_behind_master'], + last_errno=r['last_errno'], + last_error=r['last_error'], + last_io_errno=r['last_io_errno'], + last_io_error=r['last_io_error'], + last_sql_errno=r['last_sql_errno'], + last_sql_error=r['last_sql_error'] ) @@ -211,23 +217,21 @@ def drop_follower(self, target: Union[str, ConsensusNode]): with self._conn.cursor() as cur: cur.execute('drop consensus_follower %d' % target_server_id) - def upgrade_learner_to_follower(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def upgrade_learner_to_follower(self, addr) -> ConsensusNode: self.check_current_role(ConsensusRole.LEADER) - target_server_id = self._get_server_id(target) with self._conn.cursor() as cur: - cur.execute('change consensus_learner %d to consensus_follower' % target_server_id) + cur.execute("change consensus_learner '%s' to consensus_follower" % addr) - return self.get_consensus_node(self._get_address(target)) + return self.get_consensus_node(addr) - def downgrade_follower_to_learner(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def downgrade_follower_to_learner(self, addr) -> ConsensusNode: self.check_current_role(ConsensusRole.LEADER) - target_server_id = self._get_server_id(target) with self._conn.cursor() as cur: - cur.execute('change consensus_follower %d to consensus_learner' % target_server_id) + cur.execute('change consensus_follower "%s" to consensus_learner' % addr) - return self.get_consensus_node(self._get_address(target)) + return self.get_consensus_node(addr) def configure_learner_source(self, learner: Union[str, ConsensusNode], source: Union[str, ConsensusNode], *, applied_index: bool): @@ -254,16 +258,24 @@ def refresh_learner_meta(self): cur.execute('change consensus_learner for consensus_meta') def enable_follower_election(self): - self.check_current_role(ConsensusRole.FOLLOWER) + current_node = self._get_current_node() + if current_node.role != ConsensusRole.FOLLOWER: + return with self._conn.cursor() as cur: - cur.execute('set global consensus_disable_election = OFF') + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute("set global consensus_disable_election = OFF") def disable_follower_election(self): - self.check_current_role(ConsensusRole.FOLLOWER) + current_node = self._get_current_node() + if current_node.role != ConsensusRole.FOLLOWER: + return with self._conn.cursor() as cur: - cur.execute('set global consensus_disable_election = ON') + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute("set global consensus_disable_election = ON") def enable_weak_consensus_mode(self): self.check_current_role(ConsensusRole.LEADER) @@ -313,6 +325,17 @@ def show_slave_status(self): row = fetchone_with_lowercase_fieldnames(cur) return _slave_status_from_row(row) + def update_cluster_info(self, cluster_info): + with self._conn.cursor() as cur: + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute('update mysql.consensus_info set cluster_info="%s"' % cluster_info) + + def set_readonly(self): + with self._conn.cursor() as cur: + cur.execute("FLUSH TABLES WITH READ LOCK") + cur.execute("SET GLOBAL read_only = 1") + class ConsensusManager(AbstractConsensusManager): """ @@ -475,23 +498,21 @@ def drop_follower(self, target: Union[str, ConsensusNode]): node = self.downgrade_follower_to_learner(target) return self.drop_learner(node) - def upgrade_learner_to_follower(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def upgrade_learner_to_follower(self, addr) -> ConsensusNode: self.check_current_role(ConsensusRole.LEADER) - target_server_id = self._get_server_id(target) with self._conn.cursor() as cur: - cur.execute('call dbms_consensus.upgrade_learner(%d)' % target_server_id) + cur.execute("call dbms_consensus.upgrade_learner('%s')" % addr) - return self.get_consensus_node(self._get_address(target)) + return self.get_consensus_node(addr) - def downgrade_follower_to_learner(self, target: Union[str, ConsensusNode]) -> ConsensusNode: + def downgrade_follower_to_learner(self, addr) -> ConsensusNode: self.check_current_role(ConsensusRole.LEADER) - target_server_id = self._get_server_id(target) with self._conn.cursor() as cur: - cur.execute('call dbms_consensus.downgrade_follower(%d)' % target_server_id) + cur.execute('call dbms_consensus.downgrade_follower("%s")' % addr) - return self.get_consensus_node(self._get_address(target)) + return self.get_consensus_node(addr) def configure_learner_source(self, learner: Union[str, ConsensusNode], source: Union[str, ConsensusNode], *, applied_index: bool): @@ -516,16 +537,24 @@ def refresh_learner_meta(self): cur.execute('call dbms_consensus.refresh_learner_meta()') def enable_follower_election(self): - self.check_current_role(ConsensusRole.FOLLOWER) + current_node = self._get_current_node() + if current_node.role != ConsensusRole.FOLLOWER: + return with self._conn.cursor() as cur: - cur.execute('set global consensus_disable_election = OFF') + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute("set global consensus_disable_election = OFF") def disable_follower_election(self): - self.check_current_role(ConsensusRole.FOLLOWER) + current_node = self._get_current_node() + if current_node.role != ConsensusRole.FOLLOWER: + return with self._conn.cursor() as cur: - cur.execute('set global consensus_disable_election = ON') + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute("set global consensus_disable_election = ON") def enable_weak_consensus_mode(self): self.check_current_role(ConsensusRole.LEADER) @@ -574,18 +603,20 @@ def purge_consensus_log_to(self, target_log_index: int, *, local: bool = False, else: cur.execute('call dbms_consensus.local_purge_log(%d)' % to_purge_logs[-1].start_log_index) - def _slave_status_from_row(r) -> SlaveStatus: - return SlaveStatus( - relay_log_file=r['relay_log_file'], - relay_log_pos=int(r['relay_log_pos']), - slave_io_running=r['slave_io_running'], - slave_sql_running=r['slave_sql_running'], - slave_sql_running_state=r['slave_sql_running_state'], - seconds_behind_master=float(r['seconds_behind_master']) - ) def show_slave_status(self) -> SlaveStatus: with self._conn.cursor() as cur: cur.execute('show slave status') row = fetchone_with_lowercase_fieldnames(cur) - return self._slave_status_from_row(row) + return _slave_status_from_row(row) + + def update_cluster_info(self, cluster_info): + with self._conn.cursor() as cur: + cur.execute("set force_revise=ON") + cur.execute("set sql_log_bin=OFF") + cur.execute('update mysql.consensus_info set cluster_info="%s"' % cluster_info) + + def set_readonly(self): + with self._conn.cursor() as cur: + cur.execute("FLUSH TABLES WITH READ LOCK") + cur.execute("SET GLOBAL read_only = 1") diff --git a/tools/xstore/core/context/context.py b/tools/xstore/core/context/context.py index 58b9b67..33fd380 100644 --- a/tools/xstore/core/context/context.py +++ b/tools/xstore/core/context/context.py @@ -77,6 +77,7 @@ def __init__(self): self.mysql_conf = os.path.join(self._volumes['data'], 'conf') self.mycnf_path = os.path.join(self.mysql_conf, 'my.cnf') self.mycnf_override_path = os.path.join(self.mysql_conf, 'dynamic.cnf') + self.file_config_override = os.path.join(self._volumes[convention.VOLUME_CONFIG], 'my.cnf.override') if self.is_galaxy80(): self.mycnf_template_path = os.path.join(self._tools_home, 'core/engine/galaxy/templates', 'my.cnf') elif self.is_xcluster57(): diff --git a/tools/xstore/core/engine/engine.py b/tools/xstore/core/engine/engine.py index 8a84f7f..b6cb095 100644 --- a/tools/xstore/core/engine/engine.py +++ b/tools/xstore/core/engine/engine.py @@ -21,7 +21,7 @@ from abc import ABC, abstractmethod from core import consensus, convention from core.consensus import AbstractConsensusManager -from core.context import Context +from core.context import Context, PodInfo from typing import ClassVar, Sequence, AnyStr @@ -179,6 +179,19 @@ def shutdown(self): shutdown mysql """ + @abstractmethod + def prepare_handle_indicate(self, action): + """ + prepare indicate + """ + + @abstractmethod + def try_handle_indicate(self): + """ + check if it is necessary to handle indicate + try to handle indicate + """ + class Mock(Engine): """ @@ -255,6 +268,12 @@ def is_restore_prepare(self) -> bool: def shutdown(self): return + def prepare_handle_indicate(self, action): + return + + def try_handle_indicate(self): + return + class EngineCommon(Engine, ABC): def __init__(self, context: Context): @@ -341,7 +360,7 @@ def simple_daemon(self, cmd: Sequence[AnyStr], pid_file: str, *, interval=1, max f.write('') retry_count = 0 - while limit is None or retry_count < limit: + while limit is None or retry_count < limit or 'debug' == PodInfo().annotation(convention.ANNOTATION_RUNMODE): self.logger.info('starting process...') p = self.start_process(cmd) @@ -525,3 +544,16 @@ def set_restore_prepare(self, restore_prepare: bool): def is_restore_prepare(self) -> bool: return self.restore_prepare + + def prepare_handle_indicate(self, action): + indicate_file = self.context.volume_path(convention.VOLUME_DATA, 'handle_indicate') + with open(indicate_file, "w") as f: + f.write(action) + + def try_handle_indicate(self): + indicate_file = self.context.volume_path(convention.VOLUME_DATA, 'handle_indicate') + if os.path.exists(indicate_file): + with open(indicate_file, "r") as f: + action = f.readline() + self.handle_indicate(action) + os.remove(indicate_file) diff --git a/tools/xstore/core/engine/galaxy/engine.py b/tools/xstore/core/engine/galaxy/engine.py index 4c7a4f4..df56762 100644 --- a/tools/xstore/core/engine/galaxy/engine.py +++ b/tools/xstore/core/engine/galaxy/engine.py @@ -23,6 +23,7 @@ from core.consensus import AbstractConsensusManager, ConsensusManager from ..engine import EngineCommon from ...config.mysql import MySQLConfigManager +from ..util import config_util ENGINE_NAME = 'galaxy' @@ -46,10 +47,13 @@ def __init__(self, context: Context): self.file_config_template = self.context.volume_path(convention.VOLUME_CONFIG, 'my.cnf.template') self.file_config_override = self.context.volume_path(convention.VOLUME_CONFIG, 'my.cnf.override') + self.file_config_override_version = self.context.volume_path(convention.VOLUME_CONFIG, + 'my.cnf.override.version') self.path_conf = os.path.join(self.vol_data_path, 'conf') self.file_config = os.path.join(self.path_conf, 'my.cnf') self.file_config_dynamic = os.path.join(self.path_conf, 'dynamic.cnf') + self.file_config_version = os.path.join(self.path_conf, 'my.cnf.override.version') self.path_data = os.path.join(self.vol_data_path, 'data') self.path_log = os.path.join(self.vol_data_path, 'log') @@ -111,17 +115,17 @@ def _command_mysqld(self, binary: str = 'mysqld', *, extra_args: None or Dict[st def _get_cluster_info(self, learner: bool = False, local: bool = False): shared_channel = self.context.shared_channel() + pod_info = self.context.pod_info() + node_info = shared_channel.get_node_by_pod_name(pod_info.name()) if local: - return '%s:%d@1' % (self.context.pod_info().ip(), self.context.port_paxos()) + return '%s@1' % node_info.addr() - pod_info = self.context.pod_info() if learner: - node_info = shared_channel.get_node_by_pod_name(pod_info.name()) return node_info.addr() else: - idx = shared_channel.get_sort_node_index(pod_info.name()) - return ';'.join([n.addr() for n in shared_channel.list_sort_nodes()]) + '@' + str(idx + 1) + idx = shared_channel.get_node_index(self.context.pod_info().name()) + return ';'.join([n.addr() for n in shared_channel.list_nodes()]) + '@' + str(idx + 1) def _new_initialize_command(self): return self._command_mysqld(extra_args={ @@ -165,7 +169,7 @@ def _system_config(self) -> configparser.ConfigParser: system_config['mysqld'] = { 'user': 'mysql', 'port': self.context.port_access(), - 'galaxyx_port': int(self.context.port('polarx')), + 'loose_galaxyx_port': int(self.context.port('polarx')), 'loose_rpc_port': int(self.context.port('polarx')), 'basedir': self.path_home, 'datadir': self.path_data, @@ -208,18 +212,15 @@ def _system_config(self) -> configparser.ConfigParser: def _default_dynamic_config(self) -> configparser.ConfigParser: config = configparser.ConfigParser(allow_no_value=True) - buffer_pool_size = int(self.context.pod_info().memory_limit() * 5 / 8) + dynamic_config = config_util.get_dynamic_mysql_cnf_by_spec(self.context.pod_info().cpu_limit(), + self.context.pod_info().memory_limit()) config['mysqld'] = { - # Default using 5/8 of the memory limit. - 'innodb_buffer_pool_size': str(buffer_pool_size), - 'loose_rds_audit_log_buffer_size': str(int(buffer_pool_size / 100)), - 'loose_innodb_replica_log_parse_buf_size': str(int(buffer_pool_size / 10)), - 'loose_innodb_primary_flush_max_lsn_lag': str(int(buffer_pool_size / 11)), - 'loose_extra_max_connections': str(65535), - 'max_connections': str(65535), - 'max_user_connections': str(65535), - 'mysqlx_max_connections': str(4096), - 'loose_galaxy_max_connections': str(4096), + 'innodb_buffer_pool_size': dynamic_config["innodb_buffer_pool_size"], + 'loose_rds_audit_log_buffer_size': dynamic_config["loose_rds_audit_log_buffer_size"], + 'max_connections': dynamic_config["max_connections"], + 'max_user_connections': dynamic_config["max_user_connections"], + 'mysqlx_max_connections': dynamic_config["mysqlx_max_connections"], + 'loose_galaxy_max_connections': dynamic_config["loose_galaxy_max_connections"], 'default_time_zone': '+08:00', 'loose_new_rpc': self.new_rpc_enabled, } @@ -242,16 +243,18 @@ def update_config(self, **override): mgr.update(template_config_file, overrides=override_configs) + MySQLConfigManager.write_config_version(self.file_config_version, self.file_config_override_version) + def check_health(self, check_leader_readiness) -> bool: with self.context.new_connection() as conn: conn.ping() return True - def _reset_cluster_info(self, learner): + def _reset_cluster_info(self, learner, local=False): args = { 'cluster-force-change-meta': 'ON', - 'loose-cluster-info': self._get_cluster_info(learner=learner), + 'loose-cluster-info': self._get_cluster_info(learner=learner, local=local), 'user': 'mysql', } if learner: @@ -269,3 +272,5 @@ def handle_indicate(self, indicate: str): self._reset_cluster_info(learner=False) elif 'reset-cluster-info-to-learner' == indicate: self._reset_cluster_info(learner=True) + elif 'reset-cluster-info-to-local' == indicate: + self._reset_cluster_info(learner=False, local=True) diff --git a/tools/xstore/core/engine/util/__init__.py b/tools/xstore/core/engine/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tools/xstore/core/engine/util/config_util.py b/tools/xstore/core/engine/util/config_util.py new file mode 100644 index 0000000..b129c32 --- /dev/null +++ b/tools/xstore/core/engine/util/config_util.py @@ -0,0 +1,48 @@ +# Copyright 2021 Alibaba Group Holding Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mem_1G = 1 << 30 +mem_10G = 10 * mem_1G +mem_32G = 32 * mem_1G +mem_128G = 128 * mem_1G + + +def get_dynamic_mysql_cnf_by_spec(cpu, mem): + result = {} + buffer_pool_size = 0.7 * mem + if mem <= mem_10G: + buffer_pool_size = 0.3 * mem + elif mem <= mem_32G: + buffer_pool_size = 0.5 * mem + elif mem <= mem_128G: + buffer_pool_size = 0.625 * mem + buffer_pool_size = int(buffer_pool_size) + result["innodb_buffer_pool_size"] = str(buffer_pool_size) + result["loose_rds_audit_log_buffer_size"] = str(16777216) + loose_rds_kill_connections = 20 + result["loose_rds_kill_connections"] = str(loose_rds_kill_connections) + maintain_max_connections = 512 + result["loose_rds_reserved_connections"] = str(maintain_max_connections) + result["loose_maintain_max_connections"] = str(maintain_max_connections) + max_user_connections = int(cpu) + result["max_user_connections"] = str(max_user_connections) + max_connections = loose_rds_kill_connections + maintain_max_connections + max_user_connections + result["max_connections"] = str(max_connections) + result["default_time_zone"] = "+08:00" + result["polarx_max_allowed_packet"] = str(1073741824) + result["polarx_max_connections"] = str(max_user_connections) + result["loose_polarx_max_connections"] = str(max_user_connections) + result["loose_galaxy_max_connections"] = str(max_user_connections) + result["mysqlx_max_connections"] = str(max_user_connections) + return result diff --git a/tools/xstore/entrypoint.py b/tools/xstore/entrypoint.py index 3e334bb..296f259 100755 --- a/tools/xstore/entrypoint.py +++ b/tools/xstore/entrypoint.py @@ -110,6 +110,8 @@ def _start(initialize, restore_prepare, debug, ignore_indicates, cluster_start_i engine.update_config() # mv log file if log_data_separation config changes engine.try_move_log_file() + # try to flush metadata + engine.try_handle_indicate() engine.bootstrap() diff --git a/tools/xstore/requirements.txt b/tools/xstore/requirements.txt index 7233d4c..ed6b91f 100644 --- a/tools/xstore/requirements.txt +++ b/tools/xstore/requirements.txt @@ -2,4 +2,5 @@ click==7.1.2 jsons==1.4.0 PyMySQL==1.0.2 typish==1.9.1 -oss2==2.15.0 \ No newline at end of file +oss2==2.15.0 +wget==3.2 \ No newline at end of file