diff --git a/.github/labeler.yml b/.github/labeler.yml
index c3b6d2e73d4..e161cd2ca75 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -41,3 +41,10 @@ build:
workflow:
- '.github/**/*.yml'
+ - '.github/**/*.yaml'
+
+go-sdk:
+ - 'go/**/*'
+
+extensions:
+ - 'extensions/**/*'
diff --git a/.github/workflows/cicd.yaml b/.github/workflows/cicd.yaml
index 10da9a7a41a..a25218cb59e 100644
--- a/.github/workflows/cicd.yaml
+++ b/.github/workflows/cicd.yaml
@@ -213,11 +213,17 @@ jobs:
uses: actions/download-artifact@v2
with:
name: release-artifacts
+
+ - name: generate signature
+ run: |
+ sha256sum openmldb-*.tar.gz > SHA256SUM
+
- name: Release
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
uses: softprops/action-gh-release@v1
with:
files: |
openmldb-*.tar.gz
+ SHA256SUM
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 5240e13a07d..19916d9a6c4 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -37,7 +37,7 @@ jobs:
TESTING_ENABLE: ON
SQL_PYSDK_ENABLE: OFF
SQL_JAVASDK_ENABLE: OFF
- NPROC: 8
+ NPROC: 2
BUILD_SHARED_LIBS: ON
steps:
- uses: actions/checkout@v3
@@ -72,7 +72,7 @@ jobs:
uses: codecov/codecov-action@v3
with:
files: build/coverage.info
- name: coverage
+ name: coverage-cpp
fail_ci_if_error: true
verbose: true
diff --git a/.github/workflows/devops-test.yml b/.github/workflows/devops-test.yml
new file mode 100644
index 00000000000..d139c0f8bdc
--- /dev/null
+++ b/.github/workflows/devops-test.yml
@@ -0,0 +1,196 @@
+name: DEVOPS-TEST
+
+on:
+ workflow_dispatch:
+ inputs:
+ PRE_UPGRADE_VERSION:
+ description: 'version before upgrade'
+ required: false
+ default: ''
+ EXEC_TEST_TYPE:
+ description: 'Which tests need to be executed? The options are all, upgrade, node_failure, node_expansion'
+ required: true
+ default: 'all'
+
+env:
+ GIT_SUBMODULE_STRATEGY: recursive
+ HYBRIDSE_SOURCE: local
+
+jobs:
+ node-failure-test-cluster:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_failure' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_cluster.xml -t node_failure
+ - name: TEST Results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "node-failure-test-cluster Report"
+ comment_title: "node-failure-test-cluster Report"
+
+ node-failure-test-single:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_failure' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_single.xml -t node_failure
+ - name: TEST Results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "node-failure-test-single Report"
+ comment_title: "node-failure-test-single Report"
+
+ node-expansion-test-cluster:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'node_expansion' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -c test_node_expansion.xml -t node_expansion
+ - name: TEST Results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "node-expansion-test-cluster Report"
+ comment_title: "node-expansion-test-cluster Report"
+
+ upgrade-test-cluster:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test-memory
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade.xml -t upgrade -s "memory"
+ - name: upgrade results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "upgrade-test-cluster Report"
+ comment_title: "upgrade-test-cluster Report"
+ - name: sdk results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
+ check_name: "java-sdk-cluster-memory-0 Report"
+ comment_title: "java-sdk-cluster-memory-0 Report"
+ upgrade-test-single:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test-memory
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade_single.xml -t upgrade -s "memory"
+ - name: upgrade results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "upgrade-test-single Report"
+ comment_title: "upgrade-test-single Report"
+ - name: sdk results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
+ check_name: "single-java-sdk-cluster-memory-0 Report"
+ comment_title: "single-java-sdk-cluster-memory-0 Report"
+ upgrade-test-cluster-SSD:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'upgrade' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test-memory
+ run: source /root/.bashrc && bash test/steps/openmldb-devops-test.sh -v ${{ github.event.inputs.PRE_UPGRADE_VERSION }} -c test_upgrade.xml -t upgrade -s "ssd"
+ - name: upgrade results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-devops-test/target/surefire-reports/TEST-*.xml
+ check_name: "upgrade-test-cluster ssd Report"
+ comment_title: "upgrade-test-cluster ssd Report"
+ - name: sdk results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
+ check_name: "java-sdk-cluster-ssd-0 Report"
+ comment_title: "java-sdk-cluster-ssd-0 Report"
+
+
+
diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml
new file mode 100644
index 00000000000..81eb952982e
--- /dev/null
+++ b/.github/workflows/doc.yml
@@ -0,0 +1,77 @@
+name: documents
+
+# Doc workflow deploy to https://4paradigm.github.io/OpenMLDB/
+#
+# Deployment requires main(default) branch and all vX.Y branches exists.
+# Workflow triggers only on main branch, based on the model that all patches
+# to vX.Y branches will first goes to main.
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - .github/workflows/doc.yml
+ - 'docs/**'
+ pull_request:
+ paths:
+ - .github/workflows/doc.yml
+ - 'docs/**'
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+# Allow one concurrent deployment
+concurrency:
+ group: pages-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ - name: setup poetry
+ run: |
+ pipx install poetry
+
+ - name: doc build (pr)
+ working-directory: docs
+ if: github.event_name == 'pull_request'
+ run: |
+ make all-local
+
+ - name: doc build (branch)
+ working-directory: docs
+ if: github.event_name == 'push'
+ run: |
+ make
+
+ - name: upload artifact
+ if: github.event_name == 'push'
+ uses: actions/upload-pages-artifact@v1
+ with:
+ path: docs/build/
+
+ # Deployment job
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ if: github.event_name == 'push'
+ steps:
+ # This action expects an artifact named github-pages to have been created prior to execution.
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v1
diff --git a/.github/workflows/integration-test-src.yml b/.github/workflows/integration-test-src.yml
index e0d5ed692c0..2668b16179b 100644
--- a/.github/workflows/integration-test-src.yml
+++ b/.github/workflows/integration-test-src.yml
@@ -1,6 +1,9 @@
name: INTEGRATION-TEST-SRC
on:
+# pull_request:
+# schedule:
+# - cron: '0 1 * * *'
workflow_dispatch:
inputs:
EXEC_TEST_TYPE:
@@ -60,7 +63,7 @@ jobs:
# check_name: Java SDK Test Standalone1 SRC Report
# comment_title: Java SDK Test Standalone1 SRC Report
- java-sdk-test-cluster-0:
+ java-sdk-cluster-memory-0:
if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }}
runs-on: ubuntu-latest
container:
@@ -74,18 +77,122 @@ jobs:
make configure CMAKE_INSTALL_PREFIX=openmldb-linux
make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
- name: test
- run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l "0"
+ run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "0" -s "memory"
- name: TEST Results
if: always()
uses: EnricoMi/publish-unit-test-result-action@v1
with:
files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
- comment_mode: "create new"
- check_name: "Java SDK Test Cluster0 SRC Report"
- comment_title: "Java SDK Test Cluster0 SRC Report"
+ check_name: "SRC java-sdk-cluster-memory-0 Report"
+ comment_title: "SRC java-sdk-cluster-memory-0 Report"
+ - name: tar test report
+ if: ${{ failure() }}
+ run: tar -zcvf allure-results.tar.gz test/integration-test/openmldb-test-java/openmldb-sdk-test/target/allure-results
+ - name: Send Email
+ if: ${{ failure() }}
+ uses: dawidd6/action-send-mail@master
+ with:
+ server_address: smtp.partner.outlook.cn
+ server_port: 587
+ username: ${{ secrets.MAIL_USERNAME }}
+ password: ${{ secrets.MAIL_PASSWORD }}
+ subject: OpenMLDB Memory Test
+ body: OpenMLDB Memory Test Failed
+ to: ${{ secrets.MAIL_TO }}
+ from: GitHub Actions
+ content_type: text/plain
+ attachments: allure-results.tar.gz
+
+ java-sdk-cluster-memory-1:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test
+ run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster.xml -d cluster -l "1,2,3,4,5" -s "memory"
+ - name: TEST Results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
+ check_name: SRC java-sdk-cluster-memory-1 Report
+ comment_title: SRC java-sdk-cluster-memory-1 Report
+ - name: tar test report
+ if: ${{ failure() }}
+ run: tar -zcvf allure-results.tar.gz test/integration-test/openmldb-test-java/openmldb-sdk-test/target/allure-results
+ - name: Send Email
+ if: ${{ failure() }}
+ uses: dawidd6/action-send-mail@master
+ with:
+ server_address: smtp.partner.outlook.cn
+ server_port: 587
+ username: ${{ secrets.MAIL_USERNAME }}
+ password: ${{ secrets.MAIL_PASSWORD }}
+ subject: OpenMLDB Memory 1 Test
+ body: OpenMLDB Memory 1 Test Failed
+ to: ${{ secrets.MAIL_TO }}
+ from: GitHub Actions
+ content_type: text/plain
+ attachments: allure-results.tar.gz
+
+ java-sdk-cluster-ssd-0:
+ if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }}
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OS: linux
+ steps:
+ - uses: actions/checkout@v2
+ - name: build jsdk and package
+ run: |
+ make configure CMAKE_INSTALL_PREFIX=openmldb-linux
+ make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
+ tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
+ - name: test
+ run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster_disk.xml -d cluster -l "0" -s "ssd"
+ - name: TEST Results
+ if: always()
+ uses: EnricoMi/publish-unit-test-result-action@v1
+ with:
+ files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
+ check_name: "SRC java-sdk-cluster-ssd-0 Report"
+ comment_title: "SRC java-sdk-cluster-ssd-0 Report"
+ - name: tar test report
+ if: ${{ failure() }}
+ run: tar -zcvf allure-results.tar.gz test/integration-test/openmldb-test-java/openmldb-sdk-test/target/allure-results
+ - name: Send Email
+ if: ${{ failure() }}
+ uses: dawidd6/action-send-mail@master
+ with:
+ server_address: smtp.partner.outlook.cn
+ server_port: 587
+ username: ${{ secrets.MAIL_USERNAME }}
+ password: ${{ secrets.MAIL_PASSWORD }}
+ subject: OpenMLDB SSD Test
+ body: OpenMLDB SSD Test Failed
+ to: ${{ secrets.MAIL_TO }}
+ from: GitHub Actions
+ content_type: text/plain
+ attachments: allure-results.tar.gz
- java-sdk-test-cluster-1:
+ java-sdk-cluster-hdd-0:
if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'java' }}
runs-on: ubuntu-latest
container:
@@ -99,15 +206,34 @@ jobs:
make configure CMAKE_INSTALL_PREFIX=openmldb-linux
make SQL_JAVASDK_ENABLE=ON && make SQL_JAVASDK_ENABLE=ON install
tar -zcvf openmldb-linux.tar.gz openmldb-linux
+ echo "openmldb-pkg:"
+ ls -al
- name: test
- run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java.sh -b SRC -c test_all.xml -d cluster -l "1,2,3,4,5"
+ run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-java-src.sh -c test_cluster_disk.xml -d cluster -l "0" -s "hdd"
- name: TEST Results
if: always()
uses: EnricoMi/publish-unit-test-result-action@v1
with:
files: test/integration-test/openmldb-test-java/openmldb-sdk-test/target/surefire-reports/TEST-*.xml
- check_name: Java SDK Test Cluster1 SRC Report
- comment_title: Java SDK Test Cluster1 SRC Report
+ check_name: "SRC java-sdk-cluster-hdd-0 Report"
+ comment_title: "SRC java-sdk-cluster-hdd-0 Report"
+ - name: tar test report
+ if: ${{ failure() }}
+ run: tar -zcvf allure-results.tar.gz test/integration-test/openmldb-test-java/openmldb-sdk-test/target/allure-results
+ - name: Send Email
+ if: ${{ failure() }}
+ uses: dawidd6/action-send-mail@master
+ with:
+ server_address: smtp.partner.outlook.cn
+ server_port: 587
+ username: ${{ secrets.MAIL_USERNAME }}
+ password: ${{ secrets.MAIL_PASSWORD }}
+ subject: OpenMLDB HDD Test
+ body: OpenMLDB HDD Test Failed
+ to: ${{ secrets.MAIL_TO }}
+ from: GitHub Actions
+ content_type: text/plain
+ attachments: allure-results.tar.gz
# standalone-cli-test-0:
# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'standalone-cli' || github.event.inputs.EXEC_TEST_TYPE == 'cli' }}
@@ -157,29 +283,30 @@ jobs:
# check_name: Standalone CLI1 Test SRC Report
# comment_title: Standalone CLI1 Test SRC Report
- python-sdk-test-standalone-0:
- if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }}
- runs-on: ubuntu-latest
- container:
- image: ghcr.io/4paradigm/hybridsql:latest
- env:
- OS: linux
- steps:
- - uses: actions/checkout@v2
- - name: build pysdk
- run: |
- make thirdparty
- mkdir -p build
- source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../
- - name: test
- run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "0"
- - name: upload test results
- if: always()
- uses: actions/upload-artifact@v2
- with:
- name: python-sdk-standalone-0-src-${{ github.sha }}
- path: |
- python/report/allure-results
+# python-sdk-test-standalone-0:
+# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }}
+# runs-on: ubuntu-latest
+# container:
+# image: ghcr.io/4paradigm/hybridsql:latest
+# env:
+# OS: linux
+# steps:
+# - uses: actions/checkout@v2
+# - name: build pysdk
+# run: |
+# make thirdparty
+# mkdir -p build
+# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../
+# - name: test
+# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "0"
+# - name: upload test results
+# if: always()
+# uses: actions/upload-artifact@v2
+# with:
+# name: python-sdk-standalone-0-src-${{ github.sha }}
+# path: |
+# python/report/allure-results
+
# - name: allure-report
# uses: simple-elf/allure-report-action@master
# if: always()
@@ -197,74 +324,74 @@ jobs:
# PUBLISH_BRANCH: gh-pages
# PUBLISH_DIR: allure-history
- python-sdk-test-standalone-1:
- if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }}
- runs-on: ubuntu-latest
- container:
- image: ghcr.io/4paradigm/hybridsql:latest
- env:
- OS: linux
- steps:
- - uses: actions/checkout@v2
- - name: build pysdk
- run: |
- make thirdparty
- mkdir -p build
- source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../
- - name: test
- run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "1,2,3,4,5"
- - name: upload test results
- if: always()
- uses: actions/upload-artifact@v2
- with:
- name: python-sdk-standalone-1-src-${{ github.sha }}
- path: |
- python/report/allure-results
-
- apiserver-test:
- if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }}
- runs-on: ubuntu-latest
- container:
- image: ghcr.io/4paradigm/hybridsql:latest
- env:
- OS: linux
- steps:
- - uses: actions/checkout@v2
- - name: build jsdk and package
- run: |
- make thirdparty
- mkdir -p build
- source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../
- - name: test
- run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b SRC -c test_all.xml -d standalone -l "0"
- - name: TEST Results
- if: always()
- uses: EnricoMi/publish-unit-test-result-action@v1
- with:
- files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml
- check_name: APIServer SRC Report
- comment_title: APIServer SRC Report
+# python-sdk-test-standalone-1:
+# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'python' }}
+# runs-on: ubuntu-latest
+# container:
+# image: ghcr.io/4paradigm/hybridsql:latest
+# env:
+# OS: linux
+# steps:
+# - uses: actions/checkout@v2
+# - name: build pysdk
+# run: |
+# make thirdparty
+# mkdir -p build
+# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=ON -DSQL_JAVASDK_ENABLE=OFF -DTESTING_ENABLE=OFF .. && make -j$(nproc) cp_python_sdk_so openmldb && cd ../
+# - name: test
+# run: source /root/.bashrc && bash test/steps/openmldb-sdk-test-python.sh -b SRC -d standalone -l "1,2,3,4,5"
+# - name: upload test results
+# if: always()
+# uses: actions/upload-artifact@v2
+# with:
+# name: python-sdk-standalone-1-src-${{ github.sha }}
+# path: |
+# python/report/allure-results
- batch-test:
- if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }}
- runs-on: ubuntu-latest
- container:
- image: ghcr.io/4paradigm/hybridsql:latest
- env:
- OS: linux
- steps:
- - uses: actions/checkout@v2
- - name: build
- run: |
- make thirdparty
- mkdir -p build
- source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../
- - name: test
- run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b SRC
- - name: TEST Results
- if: always()
- uses: EnricoMi/publish-unit-test-result-action@v1
- with:
- files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml
- check_name: Batch Test SRC Report
- comment_title: Batch Test SRC Report
+# apiserver-test:
+# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'apiserver' }}
+# runs-on: ubuntu-latest
+# container:
+# image: ghcr.io/4paradigm/hybridsql:latest
+# env:
+# OS: linux
+# steps:
+# - uses: actions/checkout@v2
+# - name: build jsdk and package
+# run: |
+# make thirdparty
+# mkdir -p build
+# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../
+# - name: test
+# run: source /root/.bashrc && bash test/steps/openmldb-apiserver-test.sh -b SRC -c test_all.xml -d standalone -l "0"
+# - name: TEST Results
+# if: always()
+# uses: EnricoMi/publish-unit-test-result-action@v1
+# with:
+# files: test/integration-test/openmldb-test-java/openmldb-http-test/target/surefire-reports/TEST-*.xml
+# check_name: APIServer SRC Report
+# comment_title: APIServer SRC Report
+#
+# batch-test:
+# if: ${{ github.event.inputs.EXEC_TEST_TYPE == 'all' || github.event.inputs.EXEC_TEST_TYPE == 'batch' }}
+# runs-on: ubuntu-latest
+# container:
+# image: ghcr.io/4paradigm/hybridsql:latest
+# env:
+# OS: linux
+# steps:
+# - uses: actions/checkout@v2
+# - name: build
+# run: |
+# make thirdparty
+# mkdir -p build
+# source /root/.bashrc && cd build && cmake -DSQL_PYSDK_ENABLE=OFF -DSQL_JAVASDK_ENABLE=ON -DTESTING_ENABLE=OFF .. && make -j$(nproc) sql_javasdk_package openmldb && cd ../
+# - name: test
+# run: source /root/.bashrc && bash test/steps/openmldb-batch-test.sh -b SRC
+# - name: TEST Results
+# if: always()
+# uses: EnricoMi/publish-unit-test-result-action@v1
+# with:
+# files: test/batch-test/openmldb-batch-test/target/surefire-reports/TEST-*.xml
+# check_name: Batch Test SRC Report
+# comment_title: Batch Test SRC Report
diff --git a/.github/workflows/sdk.yml b/.github/workflows/sdk.yml
index d48dad0e178..7858616dbc4 100644
--- a/.github/workflows/sdk.yml
+++ b/.github/workflows/sdk.yml
@@ -6,24 +6,24 @@ on:
branches:
- main
paths-ignore:
- - 'docs/**'
- - 'demo/**'
- - 'docker/**'
- - 'image/**'
- - 'release/**'
- - 'tools/**'
- - '*.md'
+ - "docs/**"
+ - "demo/**"
+ - "docker/**"
+ - "image/**"
+ - "release/**"
+ - "tools/**"
+ - "*.md"
tags:
- v*
pull_request:
paths-ignore:
- - 'docs/**'
- - 'demo/**'
- - 'docker/**'
- - 'image/**'
- - 'release/**'
- - 'tools/**'
- - '*.md'
+ - "docs/**"
+ - "demo/**"
+ - "docker/**"
+ - "image/**"
+ - "release/**"
+ - "tools/**"
+ - "*.md"
workflow_dispatch:
env:
@@ -39,7 +39,7 @@ jobs:
image: ghcr.io/4paradigm/hybridsql:latest
env:
SQL_JAVASDK_ENABLE: ON
- OPENMLDB_BUILD_TARGET: 'cp_native_so openmldb'
+ OPENMLDB_BUILD_TARGET: "cp_native_so openmldb"
MAVEN_OPTS: -Duser.home=/github/home
SPARK_HOME: /tmp/spark/
steps:
@@ -47,8 +47,8 @@ jobs:
- uses: actions/setup-java@v2
with:
- distribution: 'adopt'
- java-version: '8'
+ distribution: "adopt"
+ java-version: "8"
server-id: ossrh
server-username: MAVEN_USERNAME
server-password: MAVEN_TOKEN
@@ -122,23 +122,22 @@ jobs:
- name: maven coverage
working-directory: java
run: |
- ./mvnw --batch-mode prepare-package
- ./mvnw --batch-mode scoverage:report
+ ./mvnw --batch-mode prepare-package
+ ./mvnw --batch-mode scoverage:report
- name: upload maven coverage
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v3
with:
files: java/**/target/site/jacoco/jacoco.xml,java/**/target/scoverage.xml
- name: coverage
+ name: coverage-java
fail_ci_if_error: true
verbose: true
- name: stop services
run: |
- cd onebox && sh stop_all.sh && cd - || exit
+ cd onebox && ./stop_all.sh && cd - || exit
sh steps/ut_zookeeper.sh stop
-
java-sdk-mac:
# mac job for java sdk. steps are almost same with job 'java-sdk'
# except mvn deploy won't target all modules, just hybridse-native & openmldb-native
@@ -149,7 +148,7 @@ jobs:
if: github.event_name == 'push'
env:
SQL_JAVASDK_ENABLE: ON
- OPENMLDB_BUILD_TARGET: 'cp_native_so openmldb'
+ OPENMLDB_BUILD_TARGET: "cp_native_so openmldb"
NPROC: 3
steps:
- uses: actions/checkout@v3
@@ -179,8 +178,8 @@ jobs:
- uses: actions/setup-java@v2
with:
- distribution: 'adopt'
- java-version: '8'
+ distribution: "adopt"
+ java-version: "8"
server-id: ossrh
server-username: MAVEN_USERNAME
server-password: MAVEN_TOKEN
@@ -242,16 +241,16 @@ jobs:
image: ghcr.io/4paradigm/hybridsql:latest
env:
SQL_PYSDK_ENABLE: ON
- OPENMLDB_BUILD_TARGET: 'cp_python_sdk_so openmldb'
+ OPENMLDB_BUILD_TARGET: "cp_python_sdk_so openmldb"
steps:
- uses: actions/checkout@v2
- name: prepare release
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
- VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
- VERSION=${VERSION#v}
- bash steps/prepare_release.sh "$VERSION"
+ VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
+ VERSION=${VERSION#v}
+ bash steps/prepare_release.sh "$VERSION"
- name: build pysdk and sqlalchemy
run: |
@@ -273,13 +272,13 @@ jobs:
with:
name: linux-ut-result-python-${{ github.sha }}
path: |
- python/openmldb/test/pytest.xml
+ python/openmldb_sdk/openmldb/tests/pytest.xml
- name: upload python coverage to codecov
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v3
with:
- name: coverage
- files: python/test/coverage.xml
+ name: coverage-python
+ files: python/openmldb_sdk/tests/coverage.xml
fail_ci_if_error: true
verbose: true
@@ -287,8 +286,9 @@ jobs:
if: >
github.repository == '4paradigm/OpenMLDB' && startsWith(github.ref, 'refs/tags/v')
run: |
- cp python/dist/openmldb*.whl .
- twine upload openmldb-*.whl
+ cp python/openmldb_sdk/dist/openmldb*.whl .
+ cp python/openmldb_tool/dist/openmldb*.whl .
+ twine upload openmldb*.whl
env:
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
@@ -298,24 +298,16 @@ jobs:
if: github.event_name == 'push'
env:
SQL_PYSDK_ENABLE: ON
- OPENMLDB_BUILD_TARGET: 'cp_python_sdk_so openmldb'
+ OPENMLDB_BUILD_TARGET: "cp_python_sdk_so openmldb"
steps:
- uses: actions/checkout@v3
- - name: Cache thirdparty
- uses: actions/cache@v3
- with:
- path: |
- .deps/
- thirdsrc
- key: ${{ runner.os }}-thirdparty-${{ hashFiles('third-party/**/CMakeLists.txt', 'third-party/**/*.cmake', 'third-party/**/*.sh') }}
-
- name: prepare release
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
- VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
- VERSION=${VERSION#v}
- bash steps/prepare_release.sh "$VERSION"
+ VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
+ VERSION=${VERSION#v}
+ bash steps/prepare_release.sh "$VERSION"
- name: build pysdk and sqlalchemy
run: |
@@ -337,21 +329,66 @@ jobs:
with:
name: mac-ut-result-python-${{ github.sha }}
path: |
- python/openmldb/test/pytest.xml
+ python/openmldb_sdk/openmldb/tests/pytest.xml
- name: upload to pypi
if: >
github.repository == '4paradigm/OpenMLDB' && startsWith(github.ref, 'refs/tags/v')
run: |
- cp python/dist/openmldb*.whl .
+ cp python/openmldb_sdk/dist/openmldb*.whl .
+ cp python/openmldb_tool/dist/openmldb*.whl .
twine upload openmldb-*.whl
env:
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
+ go-sdk:
+ runs-on: ubuntu-latest
+ container:
+ image: ghcr.io/4paradigm/hybridsql:latest
+ env:
+ OPENMLDB_BUILD_TARGET: "openmldb"
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/setup-go@v3
+ with:
+ go-version: 1.18
+
+ - name: build openmldb
+ run: make build install
+
+ - name: start server
+ run: ./openmldb/bin/start-standalone.sh
+
+ - name: init test database
+ env:
+ OPENMLDB_NS_HOST: 127.0.0.1
+ OPENMLDB_NS_PORT: 6527
+ run: |
+ echo "CREATE DATABASE test_db;" | ./openmldb/bin/openmldb --host=$OPENMLDB_NS_HOST --port=$OPENMLDB_NS_PORT
+
+ - name: go test
+ env:
+ OPENMLDB_APISERVER_HOST: 127.0.0.1
+ OPENMLDB_APISERVER_PORT: 8080
+ working-directory: go
+ run: go test ./... -race -covermode=atomic -coverprofile=coverage.out
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ name: coverage-go
+ files: go/coverage.out
+ fail_ci_if_error: true
+ verbose: true
+
+ - name: stop server
+ run: ./openmldb/bin/stop-standalone.sh
+
publish-test-results:
runs-on: ubuntu-latest
- needs: [ "java-sdk", "python-sdk" ]
+ needs: ["java-sdk", "python-sdk", "go-sdk"]
# the action will only run on 4paradigm/OpenMLDB's context, not for fork repo or dependabot
if: >
diff --git a/.gitignore b/.gitignore
index a050fe6da13..dbc3394fc30 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,12 +96,18 @@ java/hybridse-proto/src
**/scalastyle-output.xml
# test
-logs
+logs/
+out/
+allure-results/
# python builds
-/python/dist/
-/python/*.egg-info/
-/python/openmldb/native/**
-!/python/openmldb/native/__init__.pyt
-/python/test/*.xml
-
+/python/openmldb_sdk/dist/
+/python/openmldb_sdk/*.egg-info/
+/python/openmldb_sdk/openmldb/native/**
+!/python/openmldb_sdk/openmldb/native/__init__.pyt
+/python/openmldb_sdk/test/*.xml
+/python/openmldb_tool/dist/
+/python/openmldb_tool/*.egg-info/
+
+# go sdk
+!go.mod
diff --git a/.gitpod.yml b/.gitpod.yml
new file mode 100644
index 00000000000..f53233eeecb
--- /dev/null
+++ b/.gitpod.yml
@@ -0,0 +1,11 @@
+tasks:
+ - before: |
+ sudo apt update -y
+ DEBIAN_FRONTEND=noninteractive sudo apt-get install -y python3-dev build-essential autoconf git curl
+ init: |
+ make NPROC=16 # gitpod.io offers 16 CPU & 60 GB RAM
+ make install
+
+vscode:
+ extensions:
+ - ms-vscode.cpptools-extension-pack
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c3e2f368739..f01c467e74b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,114 @@
# Changelog
+## [0.6.3] - 2022-10-14
+
+### Features
+- Support setting the configuration of `glog` for clients (#2482 @vagetablechicken)
+- Add the checksum of SHA256 for release packages (#2560 @team-317)
+- Support the new build-in function `unhex` (#2431 @aucker)
+- Support the readable date and time format in CLI (#2568 @dl239)
+- Support the `LAST JOIN` with a subquery as a producer of window node in the request mode (#2569 @aceforeverd)
+- Upgrade the Spark version to 3.2.1 (#2566 @tobegit3hub, #2635 @dl239)
+- Support setting the SQL cache size in SDKs (#2605 @vagetablechicken)
+- Add a new interface of `ValidateSQL` to validate the syntax of SQL (#2626 @vagetablechicken)
+- Improve the documents (#2405 #2492 $2562 #2496 #2495 #2436 #2487 #2623 @michelle-qinqin, #2543 @linjing-lab, #2584 @JourneyGo, #2567 #2583 @vagetablechicken, #2643 @dl239)
+- Other minor features (#2504 #2572 #2498 #2598 @aceforeverd, #2555 #2641 @tobegit3hub, #2550 @zhanghaohit, #2595 @Elliezza, #2592 @vagetablechicken)
+
+### Bug Fixes
+- After a nameserver restarting, deployments may not recover. (#2533 @dl239)
+- If the type of first column is `bool`, it fails to resolve the function `count_where`. (#2570 @aceforeverd)
+- Other minor bug fixes (#2540 #2577 #2625 #2655 @dl239, #2585 @snehalsenapati23, #2539 @vagetablechicken)
+
+### Code Refactoring
+#2516 #2520 #2522 #2521 #2542 #2531 #2581 @haseeb-xd, #2525 #2526 #2527 #2528 @kstrifonoff, #2523 @ighmaZ, #2546 #2549 @NevilleMthw, #2559 @marandabui, #2554 @gokullan, #2580 @team-317, #2599 @lbartyczak, #2594 @shivamgupta-sg, #2571 @Jake-00
+
+## [0.6.2] - 2022-09-20
+
+### Features
+- Support independently executing the OpenMLDB offline engine without the OpenMLDB deployment (#2423 @tobegit3hub)
+- Support the log setting of ZooKeeper and disable ZooKeeper logs in the diagnostic tool (#2451 @vagetablechicken)
+- Support query parameters of the SQL query APIs (#2277 @qsliu2017)
+- Improve the documents (#2406 @aceforeverd, #2408 #2414 @vagetablechicken, #2410 #2402 #2356 #2374 #2396 #2376 #2419 @michelle-qinqin, #2424 #2418 @dl239, #2455 @lumianph, #2458 @tobegit3hub)
+- Other minor features (#2420 @aceforeverd, #2411 @wuyou10206, #2446 #2452 @vagetablechicken, #2475 @tobegit3hub)
+
+### Bug Fixes
+- Table creation succeeds even if `partitionnum` is set to 0, which should report an error. (#2220 @dl239)
+- There are thread races in aggregators if there are concurrent `puts`. (#2472 @zhanghaohit)
+- The `limit` clause dose not work if it is used with the `where` and `group by` clauses. (#2447 @aceforeverd)
+- The `TaskManager` process will terminate if ZooKeeper disconnects. (#2494 @tobegit3hub)
+- The replica cluster dose not create the database if a database is created in the leader cluster (#2488 @dl239)
+- When there is data in base tables, deployment with long windows still can be executed (which should report an error). (#2501 @zhanghaohit)
+- Other minor bug fixes (#2415 @aceforeverd, #2417 #2434 #2435 #2473 @dl239, #2466 @vagetablechicken)
+
+### Code Refactoring
+#2413 @dl239, #2470 #2467 #2468 @vagetablechicken
+
+## [0.6.1] - 2022-08-30
+
+### Features
+- Support new build-in functions `last_day` and `regexp_like` (#2262 @HeZean, #2187 @jiang1997)
+- Support Jupyter Notebook for the TalkingData use case (#2354 @vagetablechicken)
+- Add a new API to disable Saprk logs of the batch engine (#2359 @tobegit3hub)
+- Add the use case of precision marketing based on OneFlow (#2267 @Elliezza @vagetablechicken @siqi)
+- Support the RPC request timeout in CLI and Python SDK (#2371 @vagetablechicken)
+- Improve the documents (#2021 @liuceyim, #2348 #2316 #2324 #2361 #2315 #2323 #2355 #2328 #2360 #2378 #2319 #2350 #2395 #2398 @michelle-qinqin, #2373 @njzyfr, #2370 @tobegit3hub, #2367 #2382 #2375 #2401 @vagetablechicken, #2387 #2394 @dl239, #2379 @aceforeverd, #2403 @lumianph, #2400 gitpod-for-oss @aceforeverd, )
+- Other minor features (#2363 @aceforeverd, #2185 @qsliu2017)
+
+### Bug Fixes
+- `APIServer` will core dump if no `rs` in `QueryResp`. (#2346 @vagetablechicken)
+- Data has not been deleted from `pre-aggr` tables if there are delete operations in a main table. (#2300 @zhanghaohit)
+- Task jobs will core dump when enabling `UnsafeRowOpt` with multiple threads in the Yarn cluster. (#2352 #2364 @tobegit3hub)
+- Other minor bug fixes (#2336 @dl239, #2337 @dl239, #2385 #2372 @aceforeverd, #2383 #2384 @vagetablechicken)
+
+### Code Refactoring
+#2310 @hv789, #2306 #2305 @yeya24, #2311 @Mattt47, #2368 @TBCCC, #2391 @PrajwalBorkar, #2392 @zahyaah, #2405 @wang-jiahua
+
+## [0.6.0] - 2022-08-10
+
+### Highlights
+
+- Add a new toolkit of managing OpenMLDB, currently including a diagnostic tool and a log collector (#2299 #2326 @dl239 @vagetablechicken)
+- Support aggregate functions with suffix `_where` using pre-aggregation (#1821 #1841 #2321 #2255 #2321 @aceforeverd @nautaa @zhanghaohit)
+- Support a new SQL syntax of `EXCLUDE CURRENT_ROW` (#2053 #2165 #2278 @aceforeverd)
+- Add new OpenMLDB ecosystem plugins for DolphinScheduler (#1921 #1955 @vagetablechicken) and Airflow (#2215 @vagetablechicken)
+
+### Other Features
+
+- Support SQL syntax of `DELETE` in SQL and Kafka Connector (#2183 #2257 @dl239)
+- Support customized order in the `insert` statement (#2075 @vagetablechicken)
+- Add a new use case of TalkingData AdTracking Fraud Detection (#2008 @vagetablechicken)
+- Improve the startup script to remove `mon` (#2050 @dl239)
+- Improve the performance of offline batch SQL engine (#1882 #1943 #1973 #2142 #2273 #1773 @tobegit3hub)
+- Support returning version numbers from TaskManager (#2102 @tobegit3hub)
+- Improve the CICD workflow and release procedure (#1873 #2025 #2028 @mangoGoForward)
+- Support GitHub Codespaces (#1922 @nautaa)
+- Support new built-in functions `char(int)`, `char_length`, `character_length`, `radians`, `hex`, `median` (#1896 #1895 #1897 #2159 #2030 @wuxiaobai24 @HGZ-20 @Ivyee17)
+- Support returning result set for a new query API (#2189 @qsliu2017)
+- Improve the documents (#1796 #1817 #1818 #2254 #1948 #2227 #2254 #1824 #1829 #1832 #1840 #1842 #1844 #1845 #1848 #1849 #1851 #1858 #1875 #1923 #1925 #1939 #1942 #1945 #1957 #2031 #2054 #2140 #2195 #2304 #2264 #2260 #2257 #2254 #2247 #2240 #2227 #2115 #2126 #2116 #2154 #2152 #2178 #2147 #2146 #2184 #2138 #2145 #2160 #2197 #2198 #2133 #2224 #2223 #2222 #2209 #2248 #2244 #2242 #2241 #2226 #2225 #2221 #2219 #2201 #2291 # 2231 #2196 #2297 #2206 #2238 #2270 #2296 #2317 #2065 #2048 #2088 #2331 #1831 #1945 #2118 @ZtXavier @pearfl @PrajwalBorkar @tobegit3hub @ZtXavier @zhouxh19 @dl239 @vagetablechicken @tobegit3hub @aceforeverd @jmoldyvan @lumianph @bxiiiiii @michelle-qinqin @yclchuxue @redundan3y)
+
+### Bug Fixes
+
+- The SQL engine may produce incorrect results under certain circumstances. (#1950 #1997 #2024 @aceforeverd)
+- The `genDDL` function generates incorrect DDL if the SQL is partitioned by multiple columns. (#1956 @dl239)
+- The snapshot recovery may fail for disk tables. (#2174 @zhanghaohit)
+- `enable_trace` does not work for some SQL queries. (#2292 @aceforeverd)
+- Tablets cannot save `ttl` when updating the `ttl` of index. (#1935 @dl239)
+- MakeResultSet uses a wrong schema in projection. (#2049 @dl239)
+- A table does not exist when deploying SQL by the APIServer (#2205 @vagetablechicken)
+- The cleanup for ZooKeep does not work properly. (#2191 @mangoGoForward)
+
+Other minor bug fixes (#2052 #1959 #2253 #2273 #2288 #1964 #2175 #1938 #1963 #1956 #2171 #2036 #2170 #2236 #1867 #1869 #1900 #2162 #2161 #2173 #2190 #2084 #2085 #2034 #1972 #1408 #1863 #1862 #1919 #2093 #2167 #2073 #1803 #1998 #2000 #2012 #2055 #2174 #2036 @Xeonacid @CuriousCorrelation @Shigm1026 @jiang1997 @Harshvardhantomar @nautaa @Ivyee17 @frazie @PrajwalBorkar @dl239 @aceforeverd @tobegit3hub @dl239 @vagetablechicken @zhanghaohit @mangoGoForward @SaumyaBhushan @BrokenArrow1404 @harshlancer)
+
+### Code Refactoring
+
+#1884 #1917 #1953 #1965 #2017 #2033 #2044 @mangoGoForward; #2131 #2130 #2112 #2113 #2104 #2107 #2094 #2068 #2071 #2070 #1982 #1878 @PrajwalBorkar; #2158 #2051 #2037 #2015 #1886 #1857 @frazie; #2100 #2096 @KikiDotPy; #2089 @ayushclashroyale; #1994 @fpetrakov; #2079 kayverly; #2062 @WUBBBB; #1843 @1korenn; #2092 @HeZean; #1984 @0sirusD3m0n; #1976 @Jaguar16; #2086 @marc-marcos; #1999 @Albert-Debbarma;
+
+## [0.5.3] - 2022-07-22
+
+### Bug Fixes
+- The SQL file cannot be successfully loaded in the Yarn-Client mode. (#2151 @tobegit3hub)
+- The SQL file cannot be successfully loaded in the Yarn-Cluster mode. (#1993 @tobegit3hub)
+
## [0.5.2] - 2022-06-10
### Features
@@ -259,6 +368,11 @@ Removed
- openmldb-0.2.0-linux.tar.gz targets on x86_64
- aarch64 artifacts consider experimental
+[0.6.3]: https://github.com/4paradigm/OpenMLDB/compare/v0.6.2...v0.6.3
+[0.6.2]: https://github.com/4paradigm/OpenMLDB/compare/v0.6.1...v0.6.2
+[0.6.1]: https://github.com/4paradigm/OpenMLDB/compare/v0.6.0...v0.6.1
+[0.6.0]: https://github.com/4paradigm/OpenMLDB/compare/v0.5.3...v0.6.0
+[0.5.3]: https://github.com/4paradigm/OpenMLDB/compare/v0.5.2...v0.5.3
[0.5.2]: https://github.com/4paradigm/OpenMLDB/compare/v0.5.1...v0.5.2
[0.5.1]: https://github.com/4paradigm/OpenMLDB/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/4paradigm/OpenMLDB/compare/v0.4.4...v0.5.0
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a0b577aa35c..bfe693bcd2c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -33,7 +33,7 @@ endif()
message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
set(OPENMLDB_VERSION_MAJOR 0)
-set(OPENMLDB_VERSION_MINOR 5)
+set(OPENMLDB_VERSION_MINOR 6)
set(OPENMLDB_VERSION_BUG 0)
function(get_commitid CODE_DIR COMMIT_ID)
@@ -196,6 +196,7 @@ set_target_properties(absl::time_zone PROPERTIES INTERFACE_LINK_LIBRARIES "\$<\$
find_package(GTest REQUIRED)
+# TODO(hw): dup with hybridse root cmake, need cleanup
list(
APPEND
ABSL_LIBS
@@ -214,7 +215,8 @@ list(
absl::strings_internal
absl::synchronization
absl::time
- absl::status)
+ absl::status
+ absl::statusor)
find_package(ICU COMPONENTS i18n io uc data)
if (NOT ICU_FOUND)
diff --git a/Makefile b/Makefile
index 88654fc2519..cb311bbe6b1 100644
--- a/Makefile
+++ b/Makefile
@@ -84,7 +84,7 @@ endif
TEST_TARGET ?=
TEST_LEVEL ?=
-.PHONY: all coverage coverage-cpp coverage-java build test configure clean thirdparty-fast thirdparty openmldb-clean thirdparty-configure thirdparty-clean thirdpartybuild-clean thirdpartysrc-clean
+.PHONY: all coverage coverage-cpp coverage-java build test configure clean thirdparty-fast udf_doc_gen thirdparty openmldb-clean thirdparty-configure thirdparty-clean thirdpartybuild-clean thirdpartysrc-clean
all: build
@@ -125,6 +125,10 @@ openmldb-clean:
rm -rf "$(OPENMLDB_BUILD_DIR)"
@cd java && ./mvnw clean
+udf_doc_gen:
+ $(MAKE) build OPENMLDB_BUILD_TARGET=export_udf_info
+ $(MAKE) -C ./hybridse/tools/documentation/udf_doxygen
+
THIRD_PARTY_BUILD_DIR ?= $(MAKEFILE_DIR)/.deps
THIRD_PARTY_SRC_DIR ?= $(MAKEFILE_DIR)/thirdsrc
THIRD_PARTY_DIR ?= $(THIRD_PARTY_BUILD_DIR)/usr
diff --git a/README.md b/README.md
index 2b8ca08f59d..07d538ac981 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@
12. [Publications](#12-publications)
13. [The User List](#13-the-user-list)
-### OpenMLDB is an open-source machine learning database that provides a feature platform enabling consistent features for training and inference.
+### OpenMLDB is an open-source machine learning database that provides a feature platform computing consistent features for training and inference.
## 1. Our Philosophy
@@ -86,6 +86,10 @@ In order to achieve the goal of Development as Deployment, OpenMLDB is designed
:point_right: [Read more](https://openmldb.ai/docs/en/main/deploy/index.html)
+Or you can directly start working on this repository by clicking on the following button
+
+[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/4paradigm/OpenMLDB)
+
## 6. QuickStart
**Cluster and Standalone Versions**
@@ -105,7 +109,11 @@ We are building a list of real-world use cases based on OpenMLDB to demonstrate
| [New York City Taxi Trip Duration](https://openmldb.ai/docs/en/main/use_case/lightgbm_demo.html) | OpenMLDB, LightGBM | This is a challenge from Kaggle to predict the total ride duration of taxi trips in New York City. You can read [more detail here](https://www.kaggle.com/c/nyc-taxi-trip-duration/). It demonstrates using the open-source tools OpenMLDB + LightGBM to build an end-to-end machine learning applications easily. |
| [Importing real-time data streams from Pulsar](https://openmldb.ai/docs/en/main/use_case/pulsar_openmldb_connector_demo.html) | OpenMLDB, Pulsar, [OpenMLDB-Pulsar connector](https://pulsar.apache.org/docs/next/io-connectors/#jdbc-openmldb) | Apache Pulsar is a cloud-native streaming platform. Based on the OpenMLDB-Kafka connector , we are able to seamlessly import real-time data streams from Pulsar to OpenMLDB as the online data sources. |
| [Importing real-time data streams from Kafka](https://openmldb.ai/docs/en/main/use_case/kafka_connector_demo.html) | OpenMLDB, Kafka, [OpenMLDB-Kafka connector](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/kafka-connect-jdbc) | Apache Kafka is a distributed event streaming platform. With the OpenMLDB-Kafka connector, the real-time data streams can be imported from Kafka as the online data sources for OpenMLDB. |
-| [Building an end-to-end ML pipeline in DolphinScheduler](https://openmldb.ai/docs/en/main/use_case/dolphinscheduler_task_demo.html) | OpenMLDB, DolphinScheduler, [OpenMLDB task plugin](https://dolphinscheduler.apache.org/zh-cn/docs/dev/user_doc/guide/task/openmldb.html) | We demonstrate to build an end-to-end machine learning pipeline based on OpenMLDB and DolphinScheduler (an open-source workflow scheduler platform). It consists of feature engineering, model training, and deployment. |
+| [Building end-to-end ML pipelines in DolphinScheduler](https://openmldb.ai/docs/en/main/use_case/dolphinscheduler_task_demo.html) | OpenMLDB, DolphinScheduler, [OpenMLDB task plugin](https://dolphinscheduler.apache.org/zh-cn/docs/dev/user_doc/guide/task/openmldb.html) | We demonstrate to build an end-to-end machine learning pipeline based on OpenMLDB and DolphinScheduler (an open-source workflow scheduler platform). It consists of feature engineering, model training, and deployment. |
+| [Ad Tracking Fraud Detection](https://openmldb.ai/docs/zh/main/use_case/talkingdata_demo.html) | OpenMLDB, XGBoost | This demo uses OpenMLDB and XGBoost to [detect click fraud](https://www.kaggle.com/c/talkingdata-adtracking-fraud-detection/) for online advertisements. |
+| [SQL-based ML pipelines](https://openmldb.ai/docs/zh/main/use_case/OpenMLDB_Byzer_taxi.html) | OpenMLDB, Byzer, [OpenMLDB Plugin for Byzer](https://github.com/byzer-org/byzer-extension/tree/master/byzer-openmldb) | Byzer is a low-code open-source programming language for data pipeline, analytics and AI. Byzer has integrated OpenMLDB to deliver the capability of building ML pipelines with SQL. |
+| [Building end-to-end ML pipelines in Airflow](https://openmldb.ai/docs/zh/main/use_case/airflow_provider_demo.html) | OpenMLDB, Airflow, [Airflow OpenMLDB Provider](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/airflow-provider-openmldb), XGBoost | Airflow is a popular workflow management and scheduling tool. This demo shows how to effectively schedule OpenMLDB tasks in the Airflow through the provider package. |
+| [Precision marketing](https://openmldb.ai/docs/zh/main/use_case/JD_recommendation.html) | OpenMLDB, OneFlow | OneFlow is a deep learning framework designed to be user-friendly, scalable and efficient. This use case demonstrates to use OpenMLDB for feature engineering and OneFlow for model training/inference, to build an application for [precision marketing](https://jdata.jd.com/html/detail.html?id=1). |
## 8. Documentation
@@ -123,7 +131,7 @@ Furthermore, there are a few important features on the development roadmap but h
- Optimization based on heterogeneous storage and computing resources
- A lightweight OpenMLDB for edge computing
-## 10. Contributors
+## 10. Contribution
We really appreciate the contribution from our community.
@@ -131,12 +139,6 @@ We really appreciate the contribution from our community.
- If you are a new contributor, you may get start with [the list of issues labeled with `good first issue`](https://github.com/4paradigm/OpenMLDB/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22).
- If you have experience of OpenMLDB development, or want to tackle a challenge that may take 1-2 weeks, you may find [the list of issues labeled with `call-for-contributions`](https://github.com/4paradigm/OpenMLDB/issues?q=is%3Aopen+is%3Aissue+label%3Acall-for-contributions).
-Let's clap hands for our community contributors :clap:
-
-
-
-
-
## 11. Community
- Website: [https://openmldb.ai/en](https://openmldb.ai/en)
diff --git a/README_cn.md b/README_cn.md
index 5952dd7bec3..6a1f0dde760 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -25,7 +25,7 @@
7. [使用案例](#7-使用案例)
8. [OpenMLDB 文档](#8-openmldb-文档)
9. [Roadmap](#9-roadmap)
-10. [社区开发者](#10-社区开发者)
+10. [社区贡献](#10-社区贡献)
11. [加入社区](#11-加入社区)
12. [学术论文](#12-学术论文)
13. [用户列表](#13-用户列表)
@@ -102,7 +102,11 @@ OpenMLDB 有两种部署模式:集群版(cluster version)和单机版(st
| [出租车行程时间预测](https://openmldb.ai/docs/zh/main/use_case/taxi_tour_duration_prediction.html) | OpenMLDB, LightGBM | 这是个来自 Kaggle 的挑战,用于预测纽约市的出租车行程时间。你可以从这里阅读更多关于[该应用场景的描述](https://www.kaggle.com/c/nyc-taxi-trip-duration/)。本案例展示使用 OpenMLDB + LightGBM 的开源方案,快速搭建完整的机器学习应用。 |
| [使用 Pulsar connector 接入实时数据流](https://openmldb.ai/docs/zh/main/use_case/pulsar_openmldb_connector_demo.html) | OpenMLDB, Pulsar, [OpenMLDB-Pulsar connector](https://github.com/apache/pulsar/tree/master/pulsar-io/jdbc/openmldb) | Apache Pulsar 是一个高性能的云原生的消息队列平台,基于 OpenMLDB-Pulsar connector,我们可以高效的将 Pulsar 的数据流作为 OpenMLDB 的在线数据源,实现两者的无缝整合。 |
| [使用 Kafka connector 接入实时数据流](https://openmldb.ai/docs/zh/main/use_case/kafka_connector_demo.html) | OpenMLDB, Kafka, [OpenMLDB-Kafka connector](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/kafka-connect-jdbc) | Apache Kafka 是一个分布式消息流平台。基于 OpenMLDB-Kafka connector,实时数据流可以被简单的引入到 OpenMLDB 作为在线数据源。 |
-| [构建端到端的机器学习工作流](https://openmldb.ai/docs/zh/main/use_case/dolphinscheduler_task_demo.html) | OpenMLDB, DolphinScheduler, [OpenMLDB task plugin](https://dolphinscheduler.apache.org/zh-cn/docs/dev/user_doc/guide/task/openmldb.html) | 这个案例新演示了基于 OpenMLDB 和 DolphinScheduler(一个开源的工作流任务调度平台)来构建一个完整的机器学习工作流,包括了特征工程、模型训练,以及部署上线。 |
+| [在 DolphinScheduler 中构建端到端的机器学习工作流](https://openmldb.ai/docs/zh/main/use_case/dolphinscheduler_task_demo.html) | OpenMLDB, DolphinScheduler, [OpenMLDB task plugin](https://dolphinscheduler.apache.org/zh-cn/docs/dev/user_doc/guide/task/openmldb.html) | 这个案例新演示了基于 OpenMLDB 和 DolphinScheduler(一个开源的工作流任务调度平台)来构建一个完整的机器学习工作流,包括了特征工程、模型训练,以及部署上线。 |
+| [在线广告点击欺诈检测](https://openmldb.ai/docs/zh/main/use_case/talkingdata_demo.html) | OpenMLDB, XGBoost | 该案例演示了基于 OpenMLDB 以及 XGBoost 去构建一个[在线广告反欺诈的应用](https://www.kaggle.com/c/talkingdata-adtracking-fraud-detection/)。 |
+| [基于 SQL 构建机器学习全流程](https://openmldb.ai/docs/zh/main/use_case/OpenMLDB_Byzer_taxi.html) | OpenMLDB, Byzer, [OpenMLDB Plugin for Byzer](https://github.com/byzer-org/byzer-extension/tree/master/byzer-openmldb) | Byzer 是一门面向 Data 和 AI 的低代码、云原生的开源编程语言。Byzer 已经把 OpenMLDB 整合在内,用来一起构建完整的机器学习应用全流程。 |
+| [在 Airflow 中构建机器学习应用](https://openmldb.ai/docs/zh/main/use_case/airflow_provider_demo.html) | OpenMLDB, Airflow, [Airflow OpenMLDB Provider](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/airflow-provider-openmldb), XGBoost | Airflow 是一个流行的工作流编排和管理软件。该案例展示了如何在 Airflow 内,通过提供的 provder package,来方便的编排基于 OpenMLDB 的机器学习任务。 |
+| [精准营销](https://openmldb.ai/docs/zh/main/use_case/JD_recommendation.html) | OpenMLDB, OneFlow | OneFlow 是一个用户友好、可扩展、高效的深度学习框架。改案例展示了如何使用 OpenMLDB 做特征工程,串联 OneFlow 进行模型训练和预测,来构造一个用于[精准营销的机器学习应用](https://jdata.jd.com/html/detail.html?id=1)。 |
## 8. OpenMLDB 文档
@@ -121,7 +125,7 @@ OpenMLDB 有两种部署模式:集群版(cluster version)和单机版(st
- 基于异构存储和异构计算资源进行优化
- 轻量级 edge 版本
-## 10. 社区开发者
+## 10. 社区贡献
我们非常感谢来自社区的贡献。
@@ -130,12 +134,6 @@ OpenMLDB 有两种部署模式:集群版(cluster version)和单机版(st
- 如果你是有一定的开发经验,可以查找 [call-for-contributions](https://github.com/4paradigm/OpenMLDB/issues?q=is%3Aopen+is%3Aissue+label%3Acall-for-contributions) 标签的 issues。
- 也可以阅读我们[这个文档](https://go005qabor.feishu.cn/docs/doccn7oEU0AlCOGtYz09chIebzd)来了解不同层级的开发任务,参与和开发者讨论
-为我们已有的社区贡献者鼓掌表示感谢 :clap:
-
-
-
-
-
## 11. 加入社区
- 网站:[https://openmldb.ai/](https://openmldb.ai)
diff --git a/benchmark/README.md b/benchmark/README.md
index 7c42a2cfe9a..b30d8199df2 100644
--- a/benchmark/README.md
+++ b/benchmark/README.md
@@ -9,17 +9,16 @@ OpenMLDB Benchmak tool is used for tesing the performance of OpenMLDB's online S
## Run
- 1. Compile
+1. Compile
```bash
cd benchmark
mvn clean package
```
-2. Uncompress the package to `lib` dir and copy the configuration to `conf` dir
+2. Copy the configuration and package
```bash
mkdir -p /work/benchmark/conf /work/benchmark/lib
cp target/openmldb-benchmark-0.5.0.jar /work/benchmark/lib
cp src/main/resources/conf.properties /work/benchmark/conf
- cd /work/benchmark/lib && jar -xvf openmldb-benchmark-0.5.0.jar
```
3. Modify the configuration
```
@@ -29,7 +28,7 @@ OpenMLDB Benchmak tool is used for tesing the performance of OpenMLDB's online S
4. Run benchmark
```
cd /work/benchmark
- java -cp conf/:lib/ com._4paradigm.openmldb.benchmark.OpenMLDBPerfBenchmark
+ java -cp conf/:lib/* com._4paradigm.openmldb.benchmark.OpenMLDBPerfBenchmark
```
The above testing run with the default confguration. You can modify `WINDOW_NUM`, `WINDOW_SIZE` and `JOIN_NUM` in the confguration file if you want to evaluate the performance impact of those parameters.
diff --git a/benchmark/pom.xml b/benchmark/pom.xml
index f261f2f4a8e..b59cb3c2317 100644
--- a/benchmark/pom.xml
+++ b/benchmark/pom.xml
@@ -27,12 +27,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
com.4paradigm.openmldb
openmldb-jdbc
- 0.5.0
+ 0.6.2
com.4paradigm.openmldb
openmldb-native
- 0.5.0-allinone
+ 0.6.2-allinone
org.slf4j
diff --git a/cases/debug/diff-debug-bank.yaml b/cases/debug/diff-debug-bank.yaml
new file mode 100644
index 00000000000..438b54882f9
--- /dev/null
+++ b/cases/debug/diff-debug-bank.yaml
@@ -0,0 +1,320 @@
+db: test_zw3
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-miaoche
+ inputs:
+ -
+ name: flattenRequest
+ columns: ["reqId string","eventTime timestamp","main_id string","new_user_id string","loan_ts bigint","split_id int","time1 string"]
+ create: |
+ CREATE TABLE IF NOT EXISTS flattenRequest(
+ reqId string,
+ eventTime timestamp,
+ main_id string,
+ new_user_id string,
+ loan_ts bigInt,
+ split_id int,
+ time1 string
+ );
+ rows:
+ - ['000014b8ec0ce8ad7c20f56915fc3a9f_2000-09-11',968601600000,'13624','000014b8ec0ce8ad7c20f56915fc3a9f',5923063887,1,'2000-09-11']
+ -
+ name: action
+ create: |
+ CREATE TABLE IF NOT EXISTS action(
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ index(key=(reqId), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into action values ('000014b8ec0ce8ad7c20f56915fc3a9f_2000-09-11',968601600000,968601600000,0);
+ -
+ name: bo_bill_detail
+ create: |
+ CREATE TABLE IF NOT EXISTS bo_bill_detail(
+ ingestionTime timestamp,
+ new_user_id string,
+ bill_ts bigInt,
+ bank_id string,
+ lst_bill_amt double,
+ lst_repay_amt double,
+ card_limit double,
+ cur_blc double,
+ cur_bill_min_repay double,
+ buy_cnt double,
+ cur_bill_amt double,
+ adj_amt double,
+ rev_credit double,
+ avl_amt double,
+ advc_limit double,
+ repay_status string,
+ index(key=(new_user_id), ttl=0m, ttl_type=absolute, ts=`ingestionTime`)
+ );
+ inserts:
+ - insert into bo_bill_detail values (966441600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920909587,'16',51.71693919790691,48.78645816207608,51.58933610737785,51.799664091574954,48.822455898899634,4.0,49.79404783706583,26.457513110645905,26.457513110645905,26.457513110645905,51.58933610737785,'0');
+ - insert into bo_bill_detail values (964454400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918950767,'2',51.94205040234742,52.598874512673746,51.93387237632103,51.93387237632103,26.457513110645905,3.0,52.59481818582511,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (950630400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905234527,'16',51.60229258472921,49.15064597744367,51.58933610737785,51.60169377065059,48.61238422459857,6.0,49.14319179703328,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5884239387,'16',51.58922465011468,51.8106523796024,51.58933610737785,50.91037909896174,48.42646177452984,2.0,51.31735378992179,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (966528000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920972167,'6',51.94317086971107,51.998970182110334,51.93387237632103,51.93358932328864,49.66727796044394,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (974822400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897781927,'2',51.65759673078105,49.667580975924324,51.93387237632103,51.84769425924358,26.457513110645905,1.0,50.63292505870069,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (971712000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5894611827,'16',51.60597058480733,48.68377758555718,51.58933610737785,51.580363511708605,48.589732454501124,3.0,47.9899301937396,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (955987200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5878966887,'16',51.027420079796315,51.027918828813704,51.0440789906136,51.043883081129316,48.019525195486885,0.0,51.04437285343018,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ - insert into bo_bill_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5881563927,'16',51.043883081129316,51.043883081129316,51.58933610737785,51.58922465011468,48.599124477710504,0.0,51.58922465011468,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (958665600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913108567,'6',51.932222174676866,52.03442226065357,51.93387237632103,51.93395709937767,49.66854638501111,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (956505600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910970407,'2',51.93158576434962,51.93387237632103,51.93387237632103,51.947997073996994,26.457513110645905,1.0,51.95024446525733,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (969033600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5892018927,'16',51.58823024683053,48.62821608901564,51.58933610737785,51.60597058480733,48.61690035368359,3.0,48.94247746079064,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (974476800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5865760587,'16',51.04098353284349,51.0440789906136,51.0440789906136,51.040294865919414,48.01597650782497,0.0,51.04339330412898,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ - insert into bo_bill_detail values (972144000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895101307,'2',26.457513110645905,51.18079913405026,51.93387237632103,51.65759673078105,26.457513110645905,1.0,52.117953720383156,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (951148800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905732467,'2',47.087959182788964,26.457513110645905,51.93387237632103,51.93352578055914,26.457513110645905,0.0,51.92555343951569,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5886917547,'16',50.91037909896174,50.795689777775436,51.58933610737785,51.59442024095241,48.68997432737052,1.0,51.53996895614121,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (977068800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5868357567,'16',51.040294865919414,48.67944330002142,51.0440789906136,50.98937242210381,47.96187861208107,2.0,47.89066401711298,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ - insert into bo_bill_detail values (956073600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910515667,'6',51.96310518050283,52.10910860876436,51.93387237632103,51.932222174676866,49.66584742053638,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (948038400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5871020727,'16',50.98937242210381,51.04015282108783,51.0440789906136,51.03942495757569,48.01482583536048,0.0,51.08769225557169,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ - insert into bo_bill_detail values (959097600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913594627,'2',51.947997073996994,52.61299744359753,51.93387237632103,51.94205040234742,26.457513110645905,3.0,52.610072229564565,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (961776000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916270987,'2',51.94205040234742,26.457513110645905,51.93387237632103,51.94205040234742,26.457513110645905,0.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915720727,'16',51.56826349606898,48.71044754464898,51.58933610737785,51.603461124230805,48.614181058617035,3.0,49.22148006714142,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (947952000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902552767,'16',51.599192823144044,49.07310770676746,51.58933610737785,51.60229258472921,48.613021918000534,3.0,49.11379134214747,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915775267,'6',51.93395709937767,49.850784346888666,51.93387237632103,51.951256962656835,49.978867534188886,1.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918402067,'16',51.603461124230805,48.73642888025343,51.58933610737785,51.71693919790691,48.734682721856316,4.0,49.918256179478064,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (966355200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5889332307,'16',51.59442024095241,51.65616032962574,51.58933610737785,51.58823024683053,48.59806580513261,0.0,51.65036011491111,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (966960000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921422407,'2',51.93387237632103,51.93387430184657,51.93387237632103,51.97110447161961,26.457513110645905,2.0,51.97110543369267,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (977414400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900373327,'2',51.84769425924358,51.8945555911215,51.93387237632103,51.93264098811074,26.457513110645905,1.0,51.97556541298997,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (953395200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5876312847,'16',51.050959834267566,51.40695478240274,51.0440789906136,51.027420079796315,48.002294736814406,1.0,51.390784193277305,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ - insert into bo_bill_detail values (976982400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899884927,'16',51.599326545992824,48.73642888025343,51.58933610737785,51.599192823144044,48.60971096396274,3.0,48.73391221726407,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (948470400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5903049387,'2',51.93264098811074,52.606433066688716,51.93387237632103,47.087959182788964,26.457513110645905,3.0,51.96187352280516,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (953481600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907920007,'16',51.60169377065059,26.457513110645905,51.58933610737785,51.63982862093948,49.34031921258718,6.0,48.386326580967065,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (953308800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907824367,'6',51.93357391899772,26.457513110645905,51.93387237632103,51.96310518050283,50.63784750559605,2.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913046587,'16',51.60320145107278,48.6224875957617,51.58933610737785,51.56826349606898,48.57689162554558,3.0,47.36033572516141,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918408367,'6',51.951256962656835,51.93387237632103,51.93387237632103,51.94317086971107,49.847183471084904,1.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (955987200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910503487,'16',51.63982862093948,49.36352904726323,51.58933610737785,51.60320145107278,48.61394964410935,3.0,48.89360387617178,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (974304000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897288487,'16',51.580363511708605,48.68377758555718,51.58933610737785,51.599326545992824,48.60988582582765,3.0,48.9990306026558,26.457513110645905,26.457513110645905,26.457513110645905,50.36053315841683,'0');
+ - insert into bo_bill_detail values (953827200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908292547,'2',51.93352578055914,51.93387237632103,51.93387237632103,51.93158576434962,26.457513110645905,1.0,51.931933335858005,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,'0');
+ - insert into bo_bill_detail values (950716800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905270587,'6',26.457513110645905,26.457513110645905,51.93387237632103,51.93357391899772,49.681316407679866,3.0,26.457513110645905,26.457513110645905,26.457513110645905,26.457513110645905,51.26219172060438,'0');
+ - insert into bo_bill_detail values (950803200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5873703267,'16',51.03942495757569,49.708207571788385,51.0440789906136,51.050959834267566,48.1579214667743,1.0,49.752745652878296,26.457513110645905,26.457513110645905,26.457513110645905,49.850784346888666,'0');
+ -
+ name: bo_browse_history
+ create: |
+ CREATE TABLE IF NOT EXISTS bo_browse_history(
+ ingestionTime timestamp,
+ new_user_id string,
+ bws_ts bigInt,
+ action string,
+ subaction string,
+ index(key=(new_user_id), ttl=(0m, 9), ttl_type=absandlat, ts=`ingestionTime`)
+ );
+ -
+ name: bo_detail
+ create: |
+ CREATE TABLE IF NOT EXISTS bo_detail(
+ ingestionTime timestamp,
+ new_user_id string,
+ trx_ts bigInt,
+ trx_typ string,
+ trx_amt double,
+ is_slry string,
+ index(key=(new_user_id), ttl=(0m, 9), ttl_type=absandlat, ts=`ingestionTime`)
+ );
+ inserts:
+ - insert into bo_detail values (946742400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901291087,'1',42.84831151865846,'0');
+ - insert into bo_detail values (947001600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901550287,'1',40.41522237969254,'0');
+ - insert into bo_detail values (947088000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901636687,'1',39.878775056413154,'0');
+ - insert into bo_detail values (947174400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901723087,'1',44.23001243499712,'0');
+ - insert into bo_detail values (947260800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5901809487,'0',39.878775056413154,'0');
+ - insert into bo_detail values (947865600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902414287,'1',43.912592726916046,'0');
+ - insert into bo_detail values (947952000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902500687,'1',44.424108319695065,'0');
+ - insert into bo_detail values (948038400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902587087,'1',42.9582215646784,'0');
+ - insert into bo_detail values (948124800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902673487,'1',42.9582215646784,'0');
+ - insert into bo_detail values (948297600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902846287,'1',42.143743307874296,'0');
+ - insert into bo_detail values (948384000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5902932687,'1',39.483615589254235,'0');
+ - insert into bo_detail values (950025600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904574287,'1',37.21554379557015,'0');
+ - insert into bo_detail values (950112000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904660687,'1',37.21554379557015,'0');
+ - insert into bo_detail values (950198400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904747087,'1',40.41522237969254,'0');
+ - insert into bo_detail values (950284800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904833487,'1',42.131818142586724,'0');
+ - insert into bo_detail values (950371200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5904919887,'1',37.21554379557015,'0');
+ - insert into bo_detail values (950457600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905006287,'0',45.99504212412464,'0');
+ - insert into bo_detail values (950544000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905092687,'1',40.189050747685,'0');
+ - insert into bo_detail values (950630400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905179087,'1',40.1251928344276,'0');
+ - insert into bo_detail values (950716800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905265487,'1',42.359495983781486,'0');
+ - insert into bo_detail values (950803200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905351887,'1',41.907642501099964,'0');
+ - insert into bo_detail values (951235200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5905783887,'1',44.18007695783247,'0');
+ - insert into bo_detail values (951494400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5906043087,'1',38.44157384915451,'0');
+ - insert into bo_detail values (952704000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907166287,'0',36.097553933750135,'0');
+ - insert into bo_detail values (952963200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907425487,'1',37.21554379557015,'0');
+ - insert into bo_detail values (953049600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907511887,'1',44.92644766727055,'0');
+ - insert into bo_detail values (953136000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907598287,'1',37.21554379557015,'0');
+ - insert into bo_detail values (953222400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907684687,'1',44.6745531594889,'0');
+ - insert into bo_detail values (953308800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907771087,'1',45.31116529068746,'0');
+ - insert into bo_detail values (953395200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907857487,'1',44.93560503654089,'0');
+ - insert into bo_detail values (953481600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5907943887,'1',40.189050747685,'0');
+ - insert into bo_detail values (953654400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908116687,'1',43.2625126408534,'0');
+ - insert into bo_detail values (953740800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908203087,'1',40.6903280891172,'0');
+ - insert into bo_detail values (953827200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908289487,'1',40.189050747685,'0');
+ - insert into bo_detail values (954172800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908635087,'1',40.189050747685,'0');
+ - insert into bo_detail values (954259200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5908721487,'1',46.3139395862628,'0');
+ - insert into bo_detail values (955468800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5909931087,'0',44.79231742162934,'0');
+ - insert into bo_detail values (955555200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910017487,'0',30.56884361568164,'0');
+ - insert into bo_detail values (955641600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910103887,'1',39.367207165355275,'0');
+ - insert into bo_detail values (955814400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910276687,'1',41.53328785444273,'0');
+ - insert into bo_detail values (955900800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910363087,'1',39.367207165355275,'0');
+ - insert into bo_detail values (956073600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910535887,'0',46.57905537900055,'1');
+ - insert into bo_detail values (956160000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910622287,'1',39.367207165355275,'0');
+ - insert into bo_detail values (956246400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910708687,'1',44.893101920005485,'0');
+ - insert into bo_detail values (956332800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910795087,'0',42.9582215646784,'0');
+ - insert into bo_detail values (956419200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910881487,'1',40.189050747685,'0');
+ - insert into bo_detail values (956505600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5910967887,'1',42.9582215646784,'0');
+ - insert into bo_detail values (956592000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911054287,'1',40.8438281751356,'0');
+ - insert into bo_detail values (956764800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911227087,'0',38.05357144868271,'0');
+ - insert into bo_detail values (956851200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911313487,'0',43.757547920330275,'0');
+ - insert into bo_detail values (956937600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911399887,'0',44.54253472805516,'0');
+ - insert into bo_detail values (957024000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911486287,'1',44.969283961388584,'0');
+ - insert into bo_detail values (957110400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911572687,'1',42.9582215646784,'0');
+ - insert into bo_detail values (957196800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911659087,'1',42.143743307874296,'0');
+ - insert into bo_detail values (957283200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911745487,'1',42.63760546747436,'0');
+ - insert into bo_detail values (957456000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5911918287,'0',45.09442315852372,'0');
+ - insert into bo_detail values (957542400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912004687,'1',40.1251928344276,'0');
+ - insert into bo_detail values (957628800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912091087,'1',42.54106016544486,'0');
+ - insert into bo_detail values (957715200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912177487,'1',43.78016902662665,'0');
+ - insert into bo_detail values (957801600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912263887,'0',42.9582215646784,'0');
+ - insert into bo_detail values (958406400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912868687,'1',44.94670288241397,'0');
+ - insert into bo_detail values (958492800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5912955087,'1',39.367207165355275,'0');
+ - insert into bo_detail values (958579200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913041487,'1',40.189050747685,'0');
+ - insert into bo_detail values (958665600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913127887,'1',43.78016902662665,'0');
+ - insert into bo_detail values (958752000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913214287,'1',40.986157419304384,'0');
+ - insert into bo_detail values (958838400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913300687,'1',42.143743307874296,'0');
+ - insert into bo_detail values (958924800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913387087,'1',39.367207165355275,'0');
+ - insert into bo_detail values (959011200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913473487,'1',39.31737147877513,'0');
+ - insert into bo_detail values (959097600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913559887,'1',39.367207165355275,'0');
+ - insert into bo_detail values (959270400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913732687,'0',42.9582215646784,'0');
+ - insert into bo_detail values (959356800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913819087,'1',40.93392969163845,'0');
+ - insert into bo_detail values (959443200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5913905487,'1',42.14136803664542,'0');
+ - insert into bo_detail values (959788800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914251087,'1',40.298387064496765,'0');
+ - insert into bo_detail values (960220800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914683087,'1',44.79231742162934,'0');
+ - insert into bo_detail values (960307200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914769487,'0',46.496681602024026,'1');
+ - insert into bo_detail values (960393600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5914855887,'1',38.21717807478726,'0');
+ - insert into bo_detail values (960566400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915028687,'1',44.54253472805516,'0');
+ - insert into bo_detail values (960652800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915115087,'1',43.972516416507254,'0');
+ - insert into bo_detail values (960912000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915374287,'1',38.21717807478726,'0');
+ - insert into bo_detail values (960998400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915460687,'1',38.21717807478726,'0');
+ - insert into bo_detail values (961084800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915547087,'0',43.757547920330275,'0');
+ - insert into bo_detail values (961171200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915633487,'1',38.21717807478726,'0');
+ - insert into bo_detail values (961257600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5915719887,'1',44.85326855425366,'0');
+ - insert into bo_detail values (961689600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916151887,'1',44.79231742162934,'0');
+ - insert into bo_detail values (962121600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916583887,'1',39.87071230866085,'0');
+ - insert into bo_detail values (962208000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5916670287,'0',43.757547920330275,'0');
+ - insert into bo_detail values (962726400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917188687,'0',43.757547920330275,'0');
+ - insert into bo_detail values (963072000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917534287,'1',45.22220472290134,'0');
+ - insert into bo_detail values (963158400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917620687,'1',43.427583400414996,'0');
+ - insert into bo_detail values (963244800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917707087,'1',44.79231742162934,'0');
+ - insert into bo_detail values (963331200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917793487,'1',39.367207165355275,'0');
+ - insert into bo_detail values (963417600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917879887,'1',38.96689235748727,'0');
+ - insert into bo_detail values (963504000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5917966287,'1',40.187806608472684,'0');
+ - insert into bo_detail values (963590400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918052687,'1',38.50666435826402,'0');
+ - insert into bo_detail values (963676800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918139087,'1',40.947203811737864,'0');
+ - insert into bo_detail values (963849600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918311887,'1',44.85326855425366,'0');
+ - insert into bo_detail values (963936000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918398287,'1',42.50697707435804,'0');
+ - insert into bo_detail values (964022400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918484687,'1',40.8438281751356,'0');
+ - insert into bo_detail values (964108800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918571087,'1',41.03004143307682,'0');
+ - insert into bo_detail values (964195200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918657487,'1',42.19024887340676,'0');
+ - insert into bo_detail values (964281600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918743887,'1',40.189050747685,'0');
+ - insert into bo_detail values (964368000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5918830287,'1',44.26315171787928,'0');
+ - insert into bo_detail values (964540800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919003087,'1',40.76954991166814,'0');
+ - insert into bo_detail values (964627200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919089487,'1',41.3606636793947,'0');
+ - insert into bo_detail values (964713600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919175887,'1',43.02598749593088,'0');
+ - insert into bo_detail values (964800000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919262287,'1',40.6903280891172,'0');
+ - insert into bo_detail values (964886400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919348687,'1',41.263791633828326,'0');
+ - insert into bo_detail values (964972800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919435087,'1',41.53328785444273,'0');
+ - insert into bo_detail values (965059200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919521487,'1',38.05357144868271,'0');
+ - insert into bo_detail values (965318400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919780687,'1',43.150435687255815,'0');
+ - insert into bo_detail values (965404800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919867087,'1',38.12817199919241,'0');
+ - insert into bo_detail values (965491200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5919953487,'1',44.35972948519862,'0');
+ - insert into bo_detail values (965577600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920039887,'1',40.189050747685,'0');
+ - insert into bo_detail values (965750400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920212687,'1',37.21554379557015,'0');
+ - insert into bo_detail values (966268800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920731087,'1',41.907642501099964,'0');
+ - insert into bo_detail values (966355200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920817487,'0',30.56884361568164,'0');
+ - insert into bo_detail values (966441600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920903887,'1',41.53328785444273,'0');
+ - insert into bo_detail values (966528000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5920990287,'1',40.05638276230144,'0');
+ - insert into bo_detail values (966614400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921076687,'1',38.13474137843339,'0');
+ - insert into bo_detail values (966700800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921163087,'1',39.548505660770545,'0');
+ - insert into bo_detail values (966787200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921249487,'1',43.757547920330275,'0');
+ - insert into bo_detail values (966873600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921335887,'1',41.75219275678824,'0');
+ - insert into bo_detail values (966960000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921422287,'1',42.5241449061589,'0');
+ - insert into bo_detail values (967046400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921508687,'1',40.189050747685,'0');
+ - insert into bo_detail values (967132800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921595087,'1',41.81688773689405,'0');
+ - insert into bo_detail values (967219200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921681487,'1',30.56884361568164,'0');
+ - insert into bo_detail values (967305600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921767887,'0',36.632983771459294,'0');
+ - insert into bo_detail values (967392000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921854287,'0',45.55948199881118,'0');
+ - insert into bo_detail values (967478400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5921940687,'1',43.66837413964482,'0');
+ - insert into bo_detail values (967564800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922027087,'1',42.9582215646784,'0');
+ - insert into bo_detail values (967651200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922113487,'0',42.9582215646784,'0');
+ - insert into bo_detail values (967737600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922199887,'1',42.143743307874296,'0');
+ - insert into bo_detail values (967910400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922372687,'1',42.45387025937682,'0');
+ - insert into bo_detail values (968083200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922545487,'1',42.981263359747814,'0');
+ - insert into bo_detail values (968515200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5922977487,'0',37.34324704682227,'0');
+ - insert into bo_detail values (968601600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5923063887,'0',45.55948199881118,'0');
+ - insert into bo_detail values (971884800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5894811087,'1',37.21554379557015,'0');
+ - insert into bo_detail values (972230400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895156687,'1',38.13474137843339,'0');
+ - insert into bo_detail values (972316800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895243087,'1',37.21554379557015,'0');
+ - insert into bo_detail values (972921600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5895847887,'1',42.359495983781486,'0');
+ - insert into bo_detail values (973267200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896193487,'1',37.21554379557015,'0');
+ - insert into bo_detail values (973353600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896279887,'1',41.04228672966457,'0');
+ - insert into bo_detail values (973440000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896366287,'1',39.56939473886352,'0');
+ - insert into bo_detail values (973872000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5896798287,'0',43.757547920330275,'0');
+ - insert into bo_detail values (974131200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897057487,'1',42.9582215646784,'0');
+ - insert into bo_detail values (974217600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897143887,'1',43.757547920330275,'0');
+ - insert into bo_detail values (974304000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897230287,'1',43.757547920330275,'0');
+ - insert into bo_detail values (974563200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897489487,'1',44.79231742162934,'0');
+ - insert into bo_detail values (974736000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897662287,'1',36.523756652348894,'0');
+ - insert into bo_detail values (974822400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897748687,'1',40.41522237969254,'0');
+ - insert into bo_detail values (974908800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897835087,'0',42.19024887340676,'0');
+ - insert into bo_detail values (974995200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5897921487,'1',40.6903280891172,'0');
+ - insert into bo_detail values (975081600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898007887,'1',38.66247922728184,'0');
+ - insert into bo_detail values (975168000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898094287,'0',44.79231742162934,'0');
+ - insert into bo_detail values (975772800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5898699087,'1',42.9582215646784,'0');
+ - insert into bo_detail values (976377600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899303887,'1',37.21554379557015,'0');
+ - insert into bo_detail values (976464000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899390287,'1',37.21554379557015,'0');
+ - insert into bo_detail values (976550400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899476687,'1',39.31737147877513,'0');
+ - insert into bo_detail values (976809600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899735887,'1',44.894525278701856,'0');
+ - insert into bo_detail values (976896000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899822287,'1',42.14255687544362,'0');
+ - insert into bo_detail values (977068800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5899995087,'1',42.957057627356185,'0');
+ - insert into bo_detail values (977155200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900081487,'1',43.74606267997155,'0');
+ - insert into bo_detail values (977328000000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900254287,'1',43.78016902662665,'0');
+ - insert into bo_detail values (977500800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900427087,'1',42.94652256003971,'0');
+ - insert into bo_detail values (977587200000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900513487,'1',42.9582215646784,'0');
+ - insert into bo_detail values (977673600000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900599887,'1',42.93470158275238,'0');
+ - insert into bo_detail values (977846400000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900772687,'1',43.23059333388798,'0');
+ - insert into bo_detail values (977932800000,'000014b8ec0ce8ad7c20f56915fc3a9f',5900859087,'1',42.83541642146134,'0');
+ -
+ name: bo_user
+ create: |
+ CREATE TABLE IF NOT EXISTS bo_user(
+ ingestionTime timestamp,
+ new_user_id string,
+ sex string,
+ prof string,
+ edu string,
+ marriage string,
+ hukou_typ string,
+ index(key=(new_user_id), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into bo_user values (1603439606052,'000014b8ec0ce8ad7c20f56915fc3a9f','1','2','3','1','2');
+ sql: |
+ select
+ reqId as reqId_42,
+ ingestionTime,
+ max(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_max_41,
+ avg(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_avg_42,
+ count(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_count_43,
+ sum(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as bo_detail_trx_amt_multi_sum_44,
+ from
+ (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`, bigint(0) as `trx_ts`, '' as `trx_typ`, double(0) as `trx_amt`, '' as `is_slry`, reqId from `flattenRequest`)
+ window
+ bo_detail_new_user_id_ingestionTime_0s_5529601s_100 as (
+ UNION (select `ingestionTime`, `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`, `is_slry`, '' as reqId from `bo_detail`)
+ partition by `new_user_id` order by `ingestionTime` rows_range between 5529600999 preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW);
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/debug/diff-debug-myhug.yaml b/cases/debug/diff-debug-myhug.yaml
new file mode 100644
index 00000000000..00b9ba5599d
--- /dev/null
+++ b/cases/debug/diff-debug-myhug.yaml
@@ -0,0 +1,130 @@
+db: test_zw1
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-myhug
+ inputs:
+ -
+ name: flattenRequest
+ columns: ["reqId string","eventTime timestamp","index1 string","uUserId string","zUserId string","fRequestId string","fDisplayRank double","fSessionId string","nRoomUserNum double","nRoomInLm double","nRoomInGame double","nRequestTime timestamp","zSex string","zPhoneType string","zLongitude double","zLatitude double","zPosition string","zHome string","zChannel string","zAge double","zHasCreatedGroup string","zRegTime timestamp","zFaceScore double","zFansNum double","zFollowNum double","zGainNum double","zSGiftNum double","zSWihsperNum double","zSChatMsgNum double","zLiveAvgLength double","zLiveFrequency double","zLiveDawn double","zLiveMorning double","zLiveAfternoon double","zLiveEvening double","zMaxRGiftNumOneUser double","zRGiftUserNum double","zLiveMsgNum double","zLiveDisharmony double","zLiveShareNum double","zSmallGiftNum double","zBigGiftNum double","uSex string","uPhoneType string","uLongitude double","uLatitude double","uPosition string","uHome string","uChannel string","uAge double","uHasJoinedGroup string","uRegTime timestamp","uFirstChargeNum double","uLatestChargeTime timestamp","uRemainDiamondNum double","uFansNum double","uFollowNum double","uGainNum double","uSGiftNum double","uSWihsperNum double","uSChatMsgNum double","uLiveSMsgNum double","uHasBeenBanned double","uSMsgFiltered double","uWatchDawn double","uWatchMorning double","uWatchAfternoon double","uWatchEvening double","uWatchAvgLength double","uEnterRoomFrequency double","uTopThreeNum double","uWatchSameCity double","uPlayGame string","uLive double","uLmNum double","uSBigGiftNum double","uSSmallGiftNum double","uRGiftUserNum double","uWatchTopList int","split_id int"]
+ create: |
+ CREATE TABLE IF NOT EXISTS flattenRequest(
+ reqId string,
+ eventTime timestamp,
+ index1 string,
+ uUserId string,
+ zUserId string,
+ fRequestId string,
+ fDisplayRank double,
+ fSessionId string,
+ nRoomUserNum double,
+ nRoomInLm double,
+ nRoomInGame double,
+ nRequestTime timestamp,
+ zSex string,
+ zPhoneType string,
+ zLongitude double,
+ zLatitude double,
+ zPosition string,
+ zHome string,
+ zChannel string,
+ zAge double,
+ zHasCreatedGroup string,
+ zRegTime timestamp,
+ zFaceScore double,
+ zFansNum double,
+ zFollowNum double,
+ zGainNum double,
+ zSGiftNum double,
+ zSWihsperNum double,
+ zSChatMsgNum double,
+ zLiveAvgLength double,
+ zLiveFrequency double,
+ zLiveDawn double,
+ zLiveMorning double,
+ zLiveAfternoon double,
+ zLiveEvening double,
+ zMaxRGiftNumOneUser double,
+ zRGiftUserNum double,
+ zLiveMsgNum double,
+ zLiveDisharmony double,
+ zLiveShareNum double,
+ zSmallGiftNum double,
+ zBigGiftNum double,
+ uSex string,
+ uPhoneType string,
+ uLongitude double,
+ uLatitude double,
+ uPosition string,
+ uHome string,
+ uChannel string,
+ uAge double,
+ uHasJoinedGroup string,
+ uRegTime timestamp,
+ uFirstChargeNum double,
+ uLatestChargeTime timestamp,
+ uRemainDiamondNum double,
+ uFansNum double,
+ uFollowNum double,
+ uGainNum double,
+ uSGiftNum double,
+ uSWihsperNum double,
+ uSChatMsgNum double,
+ uLiveSMsgNum double,
+ uHasBeenBanned double,
+ uSMsgFiltered double,
+ uWatchDawn double,
+ uWatchMorning double,
+ uWatchAfternoon double,
+ uWatchEvening double,
+ uWatchAvgLength double,
+ uEnterRoomFrequency double,
+ uTopThreeNum double,
+ uWatchSameCity double,
+ uPlayGame string,
+ uLive double,
+ uLmNum double,
+ uSBigGiftNum double,
+ uSSmallGiftNum double,
+ uRGiftUserNum double,
+ uWatchTopList int,
+ split_id int,
+ index(key=(uHasJoinedGroup), ttl=0m, ttl_type=absolute, ts=`eventTime`),
+ index(key=(uPlayGame), ttl=0m, ttl_type=absolute, ts=`eventTime`),
+ index(key=(uSex), ttl=(0m, 0), ttl_type=absandlat, ts=`eventTime`),
+ index(key=(uUserId), ttl=(0m, 0), ttl_type=absandlat, ts=`eventTime`),
+ index(key=(zChannel), ttl=0m, ttl_type=absolute, ts=`eventTime`),
+ index(key=(zUserId), ttl=0m, ttl_type=absolute, ts=`eventTime`)
+ );
+ rows:
+ - ['1000013',1494076990000,'1000013','42856512','33788164','31318526',116.0,'239113725',6.0,0.0,0.0,1494076990000,'2','iPhone8,2',120.6397,31.257472999999997,'中国江苏省苏州市','','app_store',25.0,'0',1458401107000,1.0,60.0,10.0,0.0,0.0,1.0,5.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,'1','',0.0,0.0,'','','',0.0,'0',null,0.0,null,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,'0',0.0,0.0,0.0,0.0,0.0,null,1]
+ -
+ name: bo_hislabel
+ columns: ["ingestionTime timestamp","zUserId string","uUserId string","nRequestTime timestamp","fWatchedTimeLen double"]
+ create: |
+ CREATE TABLE IF NOT EXISTS bo_hislabel(
+ ingestionTime timestamp,
+ zUserId string,
+ uUserId string,
+ nRequestTime timestamp,
+ fWatchedTimeLen double,
+ index(key=(zUserId), ttl=0m, ttl_type=absolute, ts=`ingestionTime`)
+ );
+ rows:
+ - [1494076376000,'33788164','42856512',1494076376000,2.0]
+ - [1494076990000,'33788164','42856512',1494076990000,1.0]
+ sql: |
+ select
+ reqId as reqId_75,
+ max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as bo_hislabel_fWatchedTimeLen_multi_max_74,
+ avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as bo_hislabel_fWatchedTimeLen_multi_avg_75
+ from
+ (
+ select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest`
+ )
+ window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s_100 as (
+ UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`)
+ partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172800999 preceding and 1s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW);
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/debug/diff-debug-ttgwm.yaml b/cases/debug/diff-debug-ttgwm.yaml
new file mode 100644
index 00000000000..71f3c95244a
--- /dev/null
+++ b/cases/debug/diff-debug-ttgwm.yaml
@@ -0,0 +1,167 @@
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-ttgwm
+ inputs:
+ -
+ name: flattenRequest
+ create: |
+ CREATE TABLE IF NOT EXISTS flattenRequest(
+ reqId string,
+ eventTime timestamp,
+ f_index string,
+ f_action_create_order string,
+ f_action_create_order_actionTime timestamp,
+ f_action_create_order_itemType string,
+ f_action_show string,
+ f_action_show_actionTime timestamp,
+ f_action_show_itemType string,
+ f_action_collect string,
+ f_action_collect_actionTime timestamp,
+ f_action_collect_itemType string,
+ f_requestCount double,
+ f_requestId string,
+ f_userId string,
+ f_userName double,
+ f_userNickName string,
+ f_userAge double,
+ f_userGender string,
+ f_userFromGroup double,
+ f_userScore double,
+ f_userConsultCount double,
+ f_userHeadType double,
+ f_userAddress string,
+ f_userZipcode string,
+ f_userCommunicatingBuyers double,
+ f_userMessages double,
+ f_userLastMessageTime double,
+ f_userChannelTop double,
+ f_userBarANDTop double,
+ f_userLastLoginTime double,
+ f_userLastOrderTime timestamp,
+ f_userPhoneType double,
+ f_userMCC double,
+ f_userMNC double,
+ f_userAPPVersion double,
+ f_userDeviceID double,
+ f_userDeviceOS double,
+ f_userNetworkType double,
+ f_userRegisterMethod double,
+ f_userRegisterTime timestamp,
+ f_userPhoneNumber double,
+ f_userCategoryAddToCartCount double,
+ f_userHomeAddToCartCount double,
+ f_userLastBuyItTime double,
+ f_userOrderCount double,
+ f_userOrderDeliveryFreeCount double,
+ f_userOrderMoneyCount double,
+ f_userOrderMoneyAverage double,
+ f_userOrderMoneyHighest double,
+ f_userOrderScoreAverage double,
+ f_userOrderToPayCount double,
+ f_userOrderToDeliverCount double,
+ f_userOrderInDeliveryCount double,
+ f_userOrderToScoreCount double,
+ f_userFavoriteItems string,
+ f_userClickedItems string,
+ f_userSharedItemID double,
+ f_userPublishedItemID double,
+ f_userSearchedqueryCount3Period string,
+ f_userSearchedqueryCount7Period string,
+ f_userSearchedqueryCount30Period string,
+ f_userClickedqueryCount3Period string,
+ f_userClickedqueryCount7Period string,
+ f_userClickedqueryCount30Period string,
+ f_syncTime double,
+ f_itemId string,
+ f_itemtipoff string,
+ f_itemName double,
+ f_itemTitle string,
+ f_temDescription string,
+ f_itemCategoryLevel1 string,
+ f_itemCategoryLevel2 double,
+ f_itemCategoryLevel3 double,
+ f_itemHome double,
+ f_itemPurchasingPlace double,
+ f_itemDeadline double,
+ f_itemExpires double,
+ f_itemWeight double,
+ f_itemSpec double,
+ f_itemModelNumber double,
+ f_itemAgeRange double,
+ f_itemFunction double,
+ f_itemTargetPopulation double,
+ f_itemPackage double,
+ f_itemStorage double,
+ f_itemDiscount double,
+ f_itemPrice double,
+ f_itemSold double,
+ f_itemComments double,
+ f_itemFavorites double,
+ f_itemDeliveryFree double,
+ f_itemDutyFree double,
+ f_itemChannel string,
+ f_itemBrAND double,
+ f_itemPublishtime timestamp,
+ f_itemPublisherId double,
+ f_itemPublisherRegtime double,
+ f_itermPublisherOrders double,
+ f_itermSizeCount double,
+ f_itemColorCount double,
+ f_itemDetailsPhotos double,
+ f_itemDescribePhotos double,
+ f_itemExpired string,
+ f_itemHistoryPrice double,
+ f_itemCartRatio double,
+ f_itemshownUserID double,
+ f_itemClickedUserID double,
+ f_itemPurchasedUserID double,
+ f_itemTargetPopulationFemale double,
+ f_itemTargetPopulationMale double,
+ f_userOrderDids string,
+ index(key=(f_itemTitle), ttl=7201m, ttl_type=absolute, ts=`eventTime`),
+ index(key=(f_requestId), ttl=7201m, ttl_type=absolute, ts=`eventTime`),
+ index(key=(f_temDescription), ttl=7201m, ttl_type=absolute, ts=`eventTime`)
+ );
+ inserts:
+ - insert into flattenRequest values ('train_195042',1511002870000,'train_195042','0',null,'','1',1511002870000,'disclosure','0',null,'',null,'025606ecb2f078e7931ec90b9a27a826','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38282','1',null,'【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','食品',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,13600.0,null,null,null,null,null,'中粮我买网',null,1510972014000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,'');
+ - insert into flattenRequest values ('train_192870',1511002870000,'train_192870','0',null,'','1',1511002870000,'disclosure','0',null,'',null,'025606ecb2f078e7931ec90b9a27a826','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38271','1',null,'【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','服饰',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,53400.0,null,null,null,null,null,'亚马逊中国',null,1510926904000,null,null,null,null,null,null,null,'1',null,null,null,null,null,0.0,1.0,'');
+ - insert into flattenRequest values ('train_197066',1511003784000,'train_197066','0',null,'','1',1511003784000,'disclosure','0',null,'',null,'fe5eb556e3768e49b7919ebc4f9375d0','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38293','1',null,'【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','日百',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,30900.0,null,null,null,null,null,'京东',null,1510974778000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,'');
+ - insert into flattenRequest values ('train_195043',1511003784000,'train_195043','0',null,'','1',1511003784000,'disclosure','0',null,'',null,'fe5eb556e3768e49b7919ebc4f9375d0','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38282','1',null,'【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','食品',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,13600.0,null,null,null,null,null,'中粮我买网',null,1510972014000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,1.0,'');
+ - insert into flattenRequest values ('train_68005',1510928344000,'train_68005','0',null,'','1',1510928344000,'disclosure','0',null,'',null,'caae1f9bd2d0b61af2478e32ce881960','136646',null,'艺垣',null,'女',null,null,2.0,null,'北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','102600、102600、111111、111111、102600、100000、100000、111111、21212121、111111、111111、3rrrr、102600、102600、123456、345212、234789',null,null,null,null,null,null,1508605149000,null,null,null,null,null,null,null,null,1507546756000,null,null,null,null,43.0,null,5074399.0,118009.27906976744,879900.0,null,null,null,null,null,'31100,31166','30958,30973,31009,31043,31024,31005,31038,31077,31076,31075,31080,31073,31064,30830,31122,30912,31129,31119,31100,31163,31166,30927,31157,31162,30914,31138,31203,31209,30907,31198,31048,31252,31276,31302,31301,31303,31309,31400,31536,31405,31451,31439,31547,31546,31550,31737,31741,31745,31749,32001,32304,32303,32825,32872,32735,32873,32856,32888',null,null,'零食,零食','零食,零食','电脑背包,零食,零食','32856,32872,32888,32873,32735,32825','32856,32872,32888,32873,32001,32735,32304,32303,32825','31547,31048,31203,31073,31064,30912,31400,31100,31077,31076,30927,31166,31166,31119,31009,31745,32856,31122,31198,31209,31138,30907,31198,30973,31303,31076,31076,31080,31162,32872,32888,32873,31749,30914,32001,31309,31302,31252,32735,31100,30914,31075,30958,31024,31024,30958,32304,32303,31405,31303,31301,31737,31741,31166,31309,30830,30912,31129,31536,31276,31276,31301,32825,31439,31451,31546,31550,31043,30912,31157,31745,31276,31276,31439,31163,31024,31043,31005,31038,31276',null,'38151','1',null,'【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','日百',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,35900.0,null,null,null,null,null,'丰趣海淘',null,1510890999000,null,null,null,null,null,null,null,'1',null,null,null,null,null,1.0,0.0,'');
+ -
+ name: action
+ create: |
+ CREATE TABLE IF NOT EXISTS action(
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ index(key=(reqId), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into action values ('train_0',1511188285000,1511188285000,0);
+ sql: |
+ select
+ reqId as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114,
+ fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126,
+ case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127,
+ case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128
+ from
+ `flattenRequest`
+ window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100)
+ ;
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/debug/diff-debug-ttgwm2.yaml b/cases/debug/diff-debug-ttgwm2.yaml
new file mode 100644
index 00000000000..dfdd8baf8f0
--- /dev/null
+++ b/cases/debug/diff-debug-ttgwm2.yaml
@@ -0,0 +1,72 @@
+db: test1
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-ttgwm
+ inputs:
+ -
+ name: flattenRequest
+ columns: ["reqId string","eventTime timestamp","f_index string","f_requestId string","f_userGender string","f_userAddress string","f_itemTitle string","f_temDescription string","f_itemExpired string"]
+ indexs: ["index1:f_itemTitle:eventTime:0m:absolute","index2:f_requestId:eventTime:0m:absolute","index3:f_temDescription:eventTime:0m:absolute"]
+# create: |
+# CREATE TABLE IF NOT EXISTS flattenRequest(
+# reqId string,
+# eventTime timestamp,
+# f_index string,
+# f_requestId string,
+# f_userGender string,
+# f_userAddress string,
+# f_itemTitle string,
+# f_temDescription string,
+# f_itemExpired string,
+# index(key=(f_itemTitle), ttl=7201m, ttl_type=absolute, ts=`eventTime`),
+# index(key=(f_requestId), ttl=7201m, ttl_type=absolute, ts=`eventTime`),
+# index(key=(f_temDescription), ttl=7201m, ttl_type=absolute, ts=`eventTime`)
+# );
+# inserts:
+# - insert into flattenRequest values ('train_195042',1511002870000,'train_195042','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1');
+# - insert into flattenRequest values ('train_192870',1511002870000,'train_192870','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','1');
+# - insert into flattenRequest values ('train_197066',1511003784000,'train_197066','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','1');
+# - insert into flattenRequest values ('train_195043',1511003784000,'train_195043','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1');
+# - insert into flattenRequest values ('train_68005',1510928344000,'train_68005','caae1f9bd2d0b61af2478e32ce881960','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','1');
+ rows:
+ - ["train_195042",1511002870000,"train_195042",'025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。',"1"]
+ - ['train_192870',1511002870000,'train_192870','025606ecb2f078e7931ec90b9a27a826','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【新低价,40码起】saucony 圣康尼 RIDE 10 男款缓震跑鞋 2色','saucony(圣康尼)是来自美国的专业跑鞋品牌, 其名称来源于美国宾夕法尼亚州附近一条美丽的河流——Saucony。现其产品线分为专业运动系列和复古休闲系列两大类,为专业跑步运动员及跑步爱好者提供专业、舒适、安全的跑步产品。 这款saucony 圣康尼 RIDE 10 男款缓震跑鞋是索康尼旗下次顶级避震跑鞋,其获得了17年夏季《跑者世界》的最佳升级奖。从外观来看相比9代有了许多变化,鞋面采用工程网眼面料,增加了鞋面的透气性,外层为saucony经典Flex Film支撑材料覆盖,增加鞋面的延展和贴合性。后跟位置特别加强了稳定设计,外层增加了编织技术,增强整个鞋跟的稳定性。 中底采用全掌PWRFoam中底材料,比之前的EVA材质回弹效果更好,上层使用EVERUN鞋垫,辅助增加中底的缓震和回弹性能。大底依旧采用XT-900耐磨碳素橡胶,在前掌区域增加了IBR+发泡橡胶,材质较轻,并且能提高缓震保护。','1']
+ - ['train_197066',1511003784000,'train_197066','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区,新低价,PLUS会员】Panasonic 松下 ES-RF41-N405 电动剃须刀','这款Panasonic松下的ES-RF41-N405电动剃须刀,采用立体浮动4刀头,往复式设计,可较好贴合面部轮廓,剃须更舒适高效。刀片为Nano抛光刀片,提升剃须。5级电量显示,干湿两用,带弹出式修剪器。支持1小时快充,可以全身水洗。配有充电底座和便携收纳小包。','1']
+ - ['train_195043',1511003784000,'train_195043','fe5eb556e3768e49b7919ebc4f9375d0','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【限地区】东海岸 急冻波士顿龙虾(1只装)450g *2件+苹果2颗','波士顿龙虾应该是龙虾中最便宜的品种之一了(请不要用小龙虾比较),红棕及纯黑色较为常见。波龙产自北大西洋深海,虽然叫波士顿龙虾,但主要产地是加拿大和美国缅因州,波士顿并不产,因是美国最大集散地而得名~学名是美洲鳌龙虾,世界上更普及的叫法是缅因龙虾或加拿大龙虾。 波士顿龙虾属于海螯虾科螯龙虾属,生活于寒冷海域,肉较嫩滑细致,产品具有高蛋白,低脂肪,维生素A、C、D及钙、钠、钾、镁、磷、铁、硫、铜等微量元素丰富,味道鲜美。味道鲜美,营养丰富。此款龙虾中粮我买网自营,品质有保障,产品规格1只,450g。','1']
+ - ['train_68005',1510928344000,'train_68005','caae1f9bd2d0b61af2478e32ce881960','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','【历史低价】BRAUN 博朗 Satin Hair 7 HD785 负离子吹风机','BRAUN博朗HD785电吹风机,采用负离子技术,中和正离子并使毛糙秀发恢复妥帖。另外还有INOTEC炫彩护色离子科技,这种先进离子性能在造型过程中可以处理卷结与静电,使秀发更加闪亮顺滑。 最值得一提的是内置新型智能温度传感器,每分钟监控头发温度600次,及时智能调整秀发受热温度,从而避免过热,保护秀发。这款电吹风机拥有双头可以更换,2000W大功率,有速干效果,还拥有4档温度和2档风量。','1']
+ -
+ name: adinfo
+ create: |
+ CREATE TABLE IF NOT EXISTS action(
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ index(key=(reqId), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into action values ('train_0',1511188285000,1511188285000,0);
+ sql: |
+ select
+ reqId as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114,
+ fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126,
+ case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127,
+ case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128
+ from
+ `flattenRequest`
+ window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100)
+ ;
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/debug/diff-debug-ttgwm3.yaml b/cases/debug/diff-debug-ttgwm3.yaml
new file mode 100644
index 00000000000..77dd9463527
--- /dev/null
+++ b/cases/debug/diff-debug-ttgwm3.yaml
@@ -0,0 +1,51 @@
+db: test3
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-ttgwm
+ inputs:
+ -
+ name: flattenRequest
+ columns: ["reqId string","eventTime timestamp","f_index string","f_requestId string","f_userGender string","f_userAddress string","f_itemTitle string","f_temDescription string","f_itemExpired string"]
+ indexs: ["index1:f_itemTitle:eventTime:0m:absolute","index2:f_requestId:eventTime:0m:absolute","index3:f_temDescription:eventTime:0m:absolute"]
+ rows:
+ - ['train_178837',1511188561000,'train_178837','2cf15328efc127cc26ae35cac0e896db','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0']
+ - ['train_1', 1511190175000,'train_1','14d51082b22b7e78177177fa82ef942d','女','北京市,市辖区,大兴区,兴华中里社区*号楼*单元*、北京市,市辖区,大兴区,兴华中立、北京市,市辖区,东城区,*、北京市,市辖区,东城区,*、北京市,市辖区,东城区,兴华中里、北京市,市辖区,东城区,斤斤计较、北京市,市辖区,宣武区,不好好、北京市,市辖区,东城区,www、北京市,县,延庆县,*、北京市,市辖区,东城区,星湖、北京市,市辖区,东城区,xihuhu、北京市,市辖区,东城区,ededee、北京市,市辖区,大兴区,兴化中里、北京市,市辖区,大兴区,兴华中里、北京市,县,延庆县,兴华中立、山西省,太原市,市辖区,还定居、北京市,大同市,市辖区,阶段将诶','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0']
+ - ['train_147', 1511191732000,'train_147','c4c081b82bb4b4d6907924317c13e8a3','女','安徽省,合肥市,瑶海区,当涂北路与新海大道交口新海尚宸家园*号楼*','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0']
+ - ['train_104', 1511192140000,'train_104','a9a98fd04053253626ab05ede3b37e43','女','*川省,成都市,蒲江县,海川阳光尚城*栋*','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0']
+ - ['train_92', 1511192324000,'train_92','04c2c1e536c275ebf26fcc90aa86105f','女','河北省,衡水市,桃城区,河北省衡水市桃城区胜利西路利康胡同*号楼*单元*、河北省,保定市,北市区,河北省保定市莲池区*东路*号河大新区坤舆生活区','CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表','此款CITIZEN 西铁城 BM8475-26E 皮带简约光动能男士手表外观设计简约,表盘直径约42mm,表壳厚度11mm。亮骚活力的橙色刻度显示,简约三针计时,采用日本石英表芯,光动能驱动,搭载矿物玻璃表镜,防刮擦又比较耐摔。3点钟方向自带日期显示,皮革表带质感上乘,工艺精湛,还自带100米生活防水。','0']
+ -
+ name: adinfo
+ create: |
+ CREATE TABLE IF NOT EXISTS action(
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ index(key=(reqId), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into action values ('train_0',1511188285000,1511188285000,0);
+ sql: |
+ select
+ reqId as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_111,
+ distinct_count(`f_userAddress`) over flattenRequest_f_temDescription_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_112,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as flattenRequest_f_userAddress_window_unique_count_113,
+ distinct_count(`f_userAddress`) over flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as flattenRequest_f_userAddress_window_unique_count_114,
+ fz_top1_ratio(`f_itemTitle`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 as flattenRequest_f_itemTitle_window_top1_ratio_126,
+ case when !isnull(at(`f_userGender`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_userGender`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_userGender_window_count_127,
+ case when !isnull(at(`f_itemExpired`, 0)) over flattenRequest_f_requestId_eventTime_0s_432001s_100 then count(`f_itemExpired`) over flattenRequest_f_requestId_eventTime_0s_432001s_100 else null end as flattenRequest_f_itemExpired_window_count_128
+ from
+ `flattenRequest`
+ window flattenRequest_f_temDescription_eventTime_0s_172801s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_temDescription_eventTime_0s_432001s_100 as (partition by `f_temDescription` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_172801s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 172800999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_itemTitle_eventTime_0s_432001s_100 as (partition by `f_itemTitle` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100),
+ flattenRequest_f_requestId_eventTime_0s_432001s_100 as (partition by `f_requestId` order by `eventTime` rows_range between 432000999 preceding and 0s preceding MAXSIZE 100)
+ ;
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/debug/diff-debug.yaml b/cases/debug/diff-debug.yaml
new file mode 100644
index 00000000000..88a8caa1078
--- /dev/null
+++ b/cases/debug/diff-debug.yaml
@@ -0,0 +1,191 @@
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: diff-miaoche
+ inputs:
+ -
+ name: behaviourTable
+ create: |
+ CREATE TABLE IF NOT EXISTS behaviourTable(
+ itemId string,
+ reqId string,
+ tags string,
+ instanceKey string,
+ eventTime timestamp,
+ rank string,
+ mcuid string,
+ ip string,
+ browser string,
+ browser_version string,
+ platform string,
+ query string,
+ sort_rule string,
+ _i_rank string
+ );
+ inserts:
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ - insert into behaviourTable values ('15966','1cbnZ2e7db70386+6a7f','pc_000,h','1cbnZ2e7db70386+6a7f^15966',1589517641000,'d','72619bce98fd345e15d37a41cda90351','115.213.231','Google Chrome','46.0.2486.0','windows','','def','9');
+ -
+ name: feedbackTable
+ create: |
+ CREATE TABLE IF NOT EXISTS feedbackTable(
+ itemId string,
+ reqId string,
+ instanceKey string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue double,
+ rank string,
+ index(key=(instanceKey), ttl=0m, ttl_type=absolute)
+ );
+ -
+ name: adinfo
+ create: |
+ CREATE TABLE IF NOT EXISTS adinfo(
+ id string,
+ ingestionTime timestamp,
+ item_ts timestamp,
+ I_brand_id string,
+ I_series_id string,
+ I_deal_record int,
+ I_weight int,
+ I_discount double,
+ I_msrp double,
+ I_min_price double,
+ I_price_difference double,
+ index(key=(id), ttl=0m, ttl_type=absolute)
+ );
+ inserts:
+ - insert into adinfo values ('15966',1606829773651,1461455554999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829770353,1461168198999,'57','142',0,163,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829763134,1460476061999,'57','142',0,121,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829766231,1460736086999,'57','142',0,127,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829768458,1460949164999,'57','142',0,148,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829766806,1460772891999,'57','142',0,130,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829747775,1458921819999,'57','142',0,0,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829747037,1458894698999,'57','142',0,0,8.9,59400.0,52900.0,6500.0);
+ - insert into adinfo values ('15966',1606829770755,1461215180999,'57','142',0,171,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829763146,1460471547999,'57','142',0,121,10.0,59400.0,59400.0,0.0);
+ - insert into adinfo values ('15966',1606829775064,1461600012999,'57','142',0,192,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829756644,1459958431999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829758171,1460045111999,'57','142',0,90,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829753139,1459612901999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829762301,1460390409999,'57','142',0,114,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829772288,1461401287999,'57','142',0,183,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829767305,1460822654999,'57','142',0,130,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829763039,1460427581999,'57','142',0,121,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829772193,1461377130999,'57','142',0,183,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829771541,1461290734999,'57','142',0,174,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829767875,1460908981999,'57','142',0,133,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829749745,1459304681999,'57','142',0,78,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829773598,1461434542999,'57','142',0,183,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829764377,1460560778999,'57','142',0,121,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829761046,1460259644999,'57','142',0,102,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829769091,1461119108999,'57','142',0,163,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829770895,1461254443999,'57','142',0,163,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829753948,1459699532999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829763513,1460477116999,'57','142',0,121,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829760094,1460171824999,'57','142',0,99,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829764378,1460563205999,'57','142',0,121,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829762134,1460346316999,'57','142',0,114,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829764377,1460555818999,'57','142',0,121,10.0,59400.0,59400.0,0.0);
+ - insert into adinfo values ('15966',1606829748975,1459221747999,'57','142',0,75,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829774044,1461480568999,'57','142',0,192,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829748548,1459131536999,'57','142',0,69,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829768701,1460995386999,'57','142',0,148,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829772289,1461400681999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829757385,1460044803999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829774138,1461513605999,'57','142',0,183,9.8,59400.0,58400.0,1000.0);
+ - insert into adinfo values ('15966',1606829765662,1460672955999,'57','142',0,127,10.0,59400.0,59400.0,0.0);
+ - insert into adinfo values ('15966',1606829754685,1459785626999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829760278,1460217604999,'57','142',0,99,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829761325,1460304048999,'57','142',0,102,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829774768,1461573207999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829773702,1461471084999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829768628,1460987463999,'57','142',0,148,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829771673,1461340817999,'57','142',0,171,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829770007,1461198993999,'57','142',0,160,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829759457,1460131235999,'57','142',0,93,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829772025,1461386182999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829773662,1461459086999,'57','142',0,183,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829770716,1461205528999,'57','142',0,163,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829771475,1461265956999,'57','142',0,171,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829774558,1461515993999,'57','142',0,192,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829747037,1458894690999,'57','142',null,0,8.9,59400.0,52900.0,6500.0);
+ - insert into adinfo values ('15966',1606829750078,1459440043999,'57','142',0,81,9.8,59400.0,58400.0,1000.0);
+ - insert into adinfo values ('15966',1606829748683,1459180812999,'57','142',0,69,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829772033,1461427208999,'57','142',0,183,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829747371,1458901214999,'57','142',0,0,9.5,66400.0,62900.0,3500.0);
+ - insert into adinfo values ('15966',1606829748068,1459094421999,'57','142',0,0,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829765721,1460675891999,'57','142',0,127,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829749046,1459267706999,'57','142',0,75,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829770792,1461220692999,'57','142',0,163,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829772034,1461342695999,'57','142',0,174,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829765263,1460649666999,'57','142',0,127,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829752101,1459526430999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829772025,1461386244999,'57','142',0,183,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829751449,1459445334999,'57','142',0,81,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829751069,1459480434999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829771056,1461295703999,'57','142',0,171,9.5,59400.0,56400.0,3000.0);
+ - insert into adinfo values ('15966',1606829749437,1459267358999,'57','142',0,75,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829748154,1459008046999,'57','142',0,0,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829771559,1461298469999,'57','142',0,174,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829755725,1459872065999,'57','142',0,87,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829775678,1461641476999,'57','142',0,202,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829767681,1460864505999,'57','142',0,133,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829763017,1460476828999,'57','142',0,121,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829750105,1459353816999,'57','142',0,78,9.4,59400.0,55900.0,3500.0);
+ - insert into adinfo values ('15966',1606829759182,1460094013999,'57','142',0,93,9.2,59400.0,54900.0,4500.0);
+ - insert into adinfo values ('15966',1606829769452,1461081633999,'57','142',0,148,9.3,59400.0,55400.0,4000.0);
+ - insert into adinfo values ('15966',1606829765072,1460601578999,'57','142',0,127,9.3,59400.0,55400.0,4000.0);
+ sql: |
+ select * from
+ (
+ select
+ `instanceKey` as instanceKey_1,
+ `eventTime` as behaviourTable_eventTime_original_0,
+ `instanceKey` as behaviourTable_instanceKey_original_1,
+ `_i_rank` as behaviourTable__i_rank_original_14,
+ `browser` as behaviourTable_browser_original_15,
+ `browser_version` as behaviourTable_browser_version_original_16,
+ `ip` as behaviourTable_ip_original_17,
+ `itemId` as behaviourTable_itemId_original_18,
+ `mcuid` as behaviourTable_mcuid_original_19,
+ `platform` as behaviourTable_platform_original_20,
+ `query` as behaviourTable_query_original_21,
+ `rank` as behaviourTable_rank_original_22
+ from
+ `behaviourTable`
+ )
+ as out0
+ last join
+ (
+ select
+ `behaviourTable`.`instanceKey` as instanceKey_3,
+ `feedbackTable_instanceKey`.`actionValue` as feedbackTable_actionValue_multi_direct_2,
+ `adinfo_id`.`I_brand_id` as adinfo_I_brand_id_multi_direct_3,
+ `adinfo_id`.`I_deal_record` as adinfo_I_deal_record_multi_direct_4,
+ `adinfo_id`.`I_discount` as adinfo_I_discount_multi_direct_5,
+ `adinfo_id`.`I_min_price` as adinfo_I_min_price_multi_direct_6,
+ `adinfo_id`.`I_msrp` as adinfo_I_msrp_multi_direct_7,
+ `adinfo_id`.`I_price_difference` as adinfo_I_price_difference_multi_direct_8,
+ `adinfo_id`.`I_series_id` as adinfo_I_series_id_multi_direct_9,
+ `adinfo_id`.`I_weight` as adinfo_I_weight_multi_direct_10,
+ `adinfo_id`.`ingestionTime` as adinfo_ingestionTime_multi_direct_11,
+ `adinfo_id`.`item_ts` as adinfo_item_ts_multi_direct_12,
+ `feedbackTable_instanceKey`.`rank` as feedbackTable_rank_multi_direct_13
+ from
+ `behaviourTable` last join `feedbackTable` as `feedbackTable_instanceKey` on `behaviourTable`.`instanceKey` = `feedbackTable_instanceKey`.`instanceKey`
+ last join `adinfo` as `adinfo_id` on `behaviourTable`.`itemId` = `adinfo_id`.`id`
+ )
+ as out1
+ on out0.instanceKey_1 = out1.instanceKey_3;
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/function/cluster/test_cluster_batch.yaml b/cases/function/cluster/test_cluster_batch.yaml
index 8513817e196..329fc9d170d 100644
--- a/cases/function/cluster/test_cluster_batch.yaml
+++ b/cases/function/cluster/test_cluster_batch.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/cluster/test_window_row.yaml b/cases/function/cluster/test_window_row.yaml
index 5be16f45d6a..35f200af520 100644
--- a/cases/function/cluster/test_window_row.yaml
+++ b/cases/function/cluster/test_window_row.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/cluster/test_window_row_range.yaml b/cases/function/cluster/test_window_row_range.yaml
index eb8bf4921c3..476336fe4c0 100644
--- a/cases/function/cluster/test_window_row_range.yaml
+++ b/cases/function/cluster/test_window_row_range.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/cluster/window_and_lastjoin.yaml b/cases/function/cluster/window_and_lastjoin.yaml
index 47fadbbcfb0..c20e6e070ee 100644
--- a/cases/function/cluster/window_and_lastjoin.yaml
+++ b/cases/function/cluster/window_and_lastjoin.yaml
@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
db: test_zw
-debugs:
+debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/data_expiration/test_data_expiration.yaml b/cases/function/data_expiration/test_data_expiration.yaml
new file mode 100644
index 00000000000..d686692bd92
--- /dev/null
+++ b/cases/function/data_expiration/test_data_expiration.yaml
@@ -0,0 +1,70 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+- id: 0
+ desc: ttl_type=latest,ttl=4,insert 10
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:4:latest"]
+ rows:
+ - ["bb", 2, 3, 1590738989000]
+ - ["bb", 4, 5, 1590738990000]
+ - ["bb", 6, 7, 1590738991000]
+ - ["bb", 8, 9, 1590738992000]
+ - ["bb", 10, 11, 1590738993000]
+ - ["bb", 12, 13, 1590738994000]
+ - ["bb", 14, 15, 1590738995000]
+ - ["bb", 16, 17, 1590738996000]
+ - ["bb", 18, 19, 1590738997000]
+ - ["bb", 20, 21, 1590738998000]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 20, 21]
+ - ["bb", 18, 19]
+ - ["bb", 16, 17]
+ - ["bb", 14, 15]
+
+- id: 16
+ desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 4, 5, "{currentTime}-200"]
+ - ["bb", 6, 7, "{currentTime}-599000"]
+ - ["bb", 8, 9, "{currentTime}-600000"]
+ - ["bb", 10, 11, "{currentTime}-600005"]
+ - ["bb", 12, 13, "{currentTime}-600006"]
+ - ["bb", 14, 15, "{currentTime}-600007"]
+ - ["bb", 16, 17, "{currentTime}-600008"]
+ - ["bb", 18, 19, "{currentTime}-600009"]
+ - ["bb", 20, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 3]
+ - ["bb", 4, 5]
+ - ["bb", 6, 7]
\ No newline at end of file
diff --git a/cases/function/ddl/test_create.yaml b/cases/function/ddl/test_create.yaml
index ee98e8a6c2d..7319230b3ac 100644
--- a/cases/function/ddl/test_create.yaml
+++ b/cases/function/ddl/test_create.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/ddl/test_create_index.yaml b/cases/function/ddl/test_create_index.yaml
index 561a238ee4d..5549a5db039 100644
--- a/cases/function/ddl/test_create_index.yaml
+++ b/cases/function/ddl/test_create_index.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
diff --git a/cases/function/ddl/test_create_no_index.yaml b/cases/function/ddl/test_create_no_index.yaml
index 6d8a8b40a9d..f29afdf4717 100644
--- a/cases/function/ddl/test_create_no_index.yaml
+++ b/cases/function/ddl/test_create_no_index.yaml
@@ -14,28 +14,15 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
desc: 创建表不指定索引
inputs:
- -
- create: |
- create table {0} (
- id int not null,
- c1 int not null,
- c2 smallint not null,
- c3 float not null,
- c4 double not null,
- c5 bigint not null,
- c6 string not null,
- c7 timestamp not null,
- c8 date not null,
- c9 bool not null
- );
- insert: |
- insert into {0} values
- (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true);
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
sql: desc {0};
expect:
idxs:
@@ -279,126 +266,14 @@ cases:
ts: "c7"
ttl: 100min
ttlType: kAbsoluteTime
- -
- id: 12
- desc: 不指定索引,进行lastjoin
- inputs:
- - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- rows:
- - [ "aa",2,3,1590738989000 ]
- - [ "bb",21,31,1590738990000 ]
- - [ "dd",41,51,1590738990000 ]
- - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- rows:
- - [ "aa",2,13,1590738989000 ]
- - [ "bb",21,131,1590738990000 ]
- - [ "cc",41,121,1590738991000 ]
- sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
- expect:
- columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- order: c1
- rows:
- - [ "aa",2,13,1590738989000 ]
- - [ "bb",21,131,1590738990000 ]
- - [ "dd", 41, NULL, NULL ]
- -
- id: 13
- desc: 不指定索引,进行lastjoin,匹配多行
- inputs:
- - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- rows:
- - [ "aa",2,3,1590738989000 ]
- - [ "bb",21,31,1590738990000 ]
- - [ "dd",41,51,1590738990000 ]
- - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- rows:
- - [ "aa",2,13,1590738989000 ]
- - [ "aa",21,131,1590738990000 ]
- - [ "cc",41,121,1590738991000 ]
- sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
- expect:
- columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
- order: c1
- rows:
- - [ "aa",2,131,1590738990000 ]
- - [ "bb",21,NULL,NULL ]
- - [ "dd", 41, NULL, NULL ]
- -
- id: 14
- desc: 不指定索引,插入数据,可查询
- inputs:
- -
- create: |
- create table {0} (
- id int not null,
- c1 int not null,
- c2 smallint not null,
- c3 float not null,
- c4 double not null,
- c5 bigint not null,
- c6 string not null,
- c7 timestamp not null,
- c8 date not null,
- c9 bool not null
- );
- insert: |
- insert into {0} values
- (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true);
- sql: select * from {0};
- expect:
- columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"]
- order: id
- rows:
- - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true]
- -
- id: 15
- desc: 不指定索引,进行子查询操作
- inputs:
- -
- create: |
- create table {0} (
- id int not null,
- c1 int not null,
- c2 smallint not null,
- c3 float not null,
- c4 double not null,
- c5 bigint not null,
- c6 string not null,
- c7 timestamp not null,
- c8 date not null,
- c9 bool not null
- );
- insert: |
- insert into {0} values
- (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true);
- sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0});
- expect:
- columns : ["c1 int","c2 int"]
- order: id
- rows:
- - [1,1]
-
id: 16
desc: 创建表指定索引,没有默认索引
inputs:
- -
- create: |
- create table {0} (
- id int not null,
- c1 int not null,
- c2 smallint not null,
- c3 float not null,
- c4 double not null,
- c5 bigint not null,
- c6 string not null,
- c7 timestamp not null,
- c8 date not null,
- c9 bool not null,
- index(key=(c1), ts=c5)
- );
- insert: |
- insert into {0} values
- (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true);
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ indexs: ["index1:c1:c5"]
sql: desc {0};
expect:
idxs:
diff --git a/cases/function/ddl/test_options.yaml b/cases/function/ddl/test_options.yaml
index 7355a83961a..1c8ed43ad7d 100644
--- a/cases/function/ddl/test_options.yaml
+++ b/cases/function/ddl/test_options.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -55,7 +56,7 @@ cases:
name: t3
success: true
options:
- partitionNum: 1
+ partitionNum: 8
replicaNum: 1
-
id: 3
@@ -66,14 +67,14 @@ cases:
create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
options (
partitionnum = 1,
- distribution = [ ('{tb_endpoint_0}',[])]
+ distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}','{tb_endpoint_2}'])]
);
expect:
name: t3
success: true
options:
partitionNum: 1
- replicaNum: 1
+ replicaNum: 3
-
id: 4
desc: 创建表时没有distribution
@@ -109,7 +110,8 @@ cases:
success: false
-
id: 6
- desc: partitionnum=0
+ desc: partitionnum=0,指定distribution
+ tags: ["TODO","bug修复后验证"]
mode: standalone-unsupport
inputs:
- name: t3
@@ -121,7 +123,11 @@ cases:
distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
);
expect:
- success: false
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
-
id: 7
desc: partitionnum=10
@@ -288,7 +294,7 @@ cases:
success: true
options:
partitionNum: 1
- replicaNum: 1
+ replicaNum: 3
-
id: 18
desc: 只有replicanum
@@ -303,11 +309,11 @@ cases:
name: t3
success: true
options:
- partitionNum: 1
+ partitionNum: 8
replicaNum: 1
-
id: 19
- desc: 只有distribution
+ desc: 没有replicaNum,distribution的个数和tablet数量不一致
inputs:
- name: t3
sql: |
@@ -316,11 +322,7 @@ cases:
distribution = [ ('{tb_endpoint_0}', [])]
);
expect:
- name: t3
- success: true
- options:
- partitionNum: 1
- replicaNum: 1
+ success: false
-
id: 20
desc: distribution指定的tablet不存在
@@ -379,8 +381,39 @@ cases:
options:
partitionNum: 1
replicaNum: 3
-
-
+ -
+ id: 23
+ tags: ["TODO","bug修复后验证"]
+ desc: partitionnum=0,没有指定distribution
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 0,
+ replicanum = 3
+ );
+ expect:
+ success: false
+ -
+ id: 24
+ desc: 没有partitionnum和replicanum,指定distribution
+ tags: ["TODO","bug修复后验证"]
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
diff --git a/cases/function/ddl/test_ttl.yaml b/cases/function/ddl/test_ttl.yaml
index 7fb6582f47e..ba2456856c1 100644
--- a/cases/function/ddl/test_ttl.yaml
+++ b/cases/function/ddl/test_ttl.yaml
@@ -1,5 +1,6 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -194,15 +195,15 @@ cases:
indexs: ["index1:c1:c4:(10m,2):absandlat"]
rows:
- [1,"aa", 1, 1590738990000,1590738990000]
- - [2,"aa", 2, 1590738990000,1590738990000]
- - [3,"aa", 3, 1590738990000,1590738990000]
+ - [2,"aa", 2, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
sql: select * from {0};
expect:
columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"]
order: id
rows:
- - [2,"aa", 2, 1590738990000,1590738990000]
- - [3,"aa", 3, 1590738990000,1590738990000]
+ - [2,"aa", 2, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
-
id: 23
desc: 指定ttl_type=absorlat,部分数据过期
diff --git a/cases/function/disk_table/disk_table.yaml b/cases/function/disk_table/disk_table.yaml
new file mode 100644
index 00000000000..33c0b45e0be
--- /dev/null
+++ b/cases/function/disk_table/disk_table.yaml
@@ -0,0 +1,486 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 创建SSD表,插入多条数据,查询
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 1
+ desc: 创建HDD表,插入多条数据,查询
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+
+ -
+ id: 2
+ desc: ssd和内存表,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 3
+ desc: hdd和内存表,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 4
+ desc: 内存表和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 5
+ desc: 内存表和hdd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 6
+ desc: hdd和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 7
+ desc: hdd和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} on {0}.c1 = {2}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+
+ - id: 8
+ desc: ssd union 内存表
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: SSD
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 9
+ desc: hdd union 内存表
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: HDD
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 10
+ desc: 内存表 union ssd
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: SSD
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 11
+ desc: 内存表 union hdd
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: HDD
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 12
+ desc: SSD 插入索引和ts 一样的数据
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - id: 13
+ desc: HDD 插入索引和ts 一样的数据
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - id: 14
+ desc: storage_mode=其他字符
+ mode: request-unsupport
+ sql: |
+ create table auto_MDYewbTv(
+ c1 string,
+ c2 int,
+ c3 bigint,
+ c4 timestamp,
+ index(key=(c1),ts=c4))options(partitionnum=1,replicanum=1,storage_mode="hdp");
+ expect:
+ success: false
+
+ - id: 15
+ desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:4:latest"]
+ storage: SSD
+ rows:
+ - ["bb", 2, 3, 1590738989000]
+ - ["bb", 4, 5, 1590738990000]
+ - ["bb", 6, 7, 1590738991000]
+ - ["bb", 8, 9, 1590738992000]
+ - ["bb", 10, 11, 1590738993000]
+ - ["bb", 12, 13, 1590738994000]
+ - ["bb", 14, 15, 1590738995000]
+ - ["bb", 16, 17, 1590738996000]
+ - ["bb", 18, 19, 1590738997000]
+ - ["bb", 20, 21, 1590738998000]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 20, 21]
+ - ["bb", 18, 19]
+ - ["bb", 16, 17]
+ - ["bb", 14, 15]
+
+ - id: 16
+ desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 4, 5, "{currentTime}-200"]
+ - ["bb", 6, 7, "{currentTime}-599000"]
+ - ["bb", 8, 9, "{currentTime}-600000"]
+ - ["bb", 10, 11, "{currentTime}-600005"]
+ - ["bb", 12, 13, "{currentTime}-600006"]
+ - ["bb", 14, 15, "{currentTime}-600007"]
+ - ["bb", 16, 17, "{currentTime}-600008"]
+ - ["bb", 18, 19, "{currentTime}-600009"]
+ - ["bb", 20, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 3]
+ - ["bb", 4, 5]
+ - ["bb", 6, 7]
+
+ - id: 17
+ desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 2, 5, "{currentTime}-200"]
+ - ["bb", 2, 7, "{currentTime}-59"]
+ - ["bb", 2, 9, "{currentTime}-600"]
+ - ["bb", 2, 11, "{currentTime}-602"]
+ - ["bb", 2, 13, "{currentTime}-600006"]
+ - ["bb", 2, 15, "{currentTime}-600007"]
+ - ["bb", 2, 17, "{currentTime}-600008"]
+ - ["bb", 2, 19, "{currentTime}-600009"]
+ - ["bb", 2, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 7]
+ - ["bb", 2, 3]
+ - ["bb", 2, 5]
+ - ["bb", 2, 9]
+
+ - id: 18
+ desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 2, 5, "{currentTime}-200"]
+ - ["bb", 2, 7, "{currentTime}-59"]
+ - ["bb", 2, 9, "{currentTime}-600"]
+ - ["bb", 2, 11, "{currentTime}-602"]
+ - ["bb", 2, 13, "{currentTime}-600006"]
+ - ["bb", 2, 15, "{currentTime}-600007"]
+ - ["bb", 2, 17, "{currentTime}-600008"]
+ - ["bb", 2, 19, "{currentTime}-600009"]
+ - ["bb", 2, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0} where c1 = "bb";
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 7]
+ - ["bb", 2, 3]
+ - ["bb", 2, 5]
+ - ["bb", 2, 9]
+ - ["bb", 2, 11]
diff --git a/cases/function/dml/multi_insert.yaml b/cases/function/dml/multi_insert.yaml
index a846b0c2014..1f606089abe 100644
--- a/cases/function/dml/multi_insert.yaml
+++ b/cases/function/dml/multi_insert.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: multi_insert_db
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: 简单INSERT
diff --git a/cases/function/dml/test_delete.yaml b/cases/function/dml/test_delete.yaml
new file mode 100644
index 00000000000..51e0a39736f
--- /dev/null
+++ b/cases/function/dml/test_delete.yaml
@@ -0,0 +1,597 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: delete 一个key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 1
+ desc: delete 组合索引
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' and c2=1;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ -
+ id: 2
+ desc: delete 一个索引的两个key
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' or c1='cc';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 3
+ desc: delete 两个索引的两个key
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c2:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' or c2=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 4
+ desc: 两个索引 delete 其中一个
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"aa",1,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=2;
+ sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint"]
+ order: id
+ rows:
+ - [1,1,1]
+ - [2,1,2]
+ - [4,1,3]
+ -
+ id: 5
+ desc: delete 不是索引列
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 6
+ desc: delete key不存在
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='cc';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 7
+ desc: delete null
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,null,1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1=null;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 8
+ desc: delete 空串
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 10
+ desc: delete int
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,3,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c3=3;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 11
+ desc: delete smallint
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 12
+ desc: delete bigint
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c4:c7"]
+ rows:
+ - [1,"aa",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c4=4;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 13
+ desc: delete date
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c8:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-02",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c8='2020-05-02';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 14
+ desc: delete timestamp
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c7:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c7=1590738989000;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 15
+ desc: delete bool
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c9:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c9=true;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false]
+ -
+ id: 16
+ desc: 两次delete相同index 不同的key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - delete from {0} where c1='cc';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 17
+ desc: 两次delete 不同的index
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - delete from {0} where c2=2;
+ sql: |
+ SELECT id, c2, count(c4) OVER w1 as w1_c4_count, count(c5) OVER w2 as w2_c5_count FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint","w2_c5_count bigint"]
+ order: id
+ rows:
+ - [1,1,1,1]
+ - [2,1,1,2]
+ -
+ id: 18
+ desc: delete过期数据
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 19
+ desc: delete表不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sql: delete from {0}1 where c1='aa';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 20
+ desc: delete列不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c11=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 21
+ desc: delete 其他库的数据
+ inputs:
+ -
+ db: d1
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from d1.{0} where c1='aa';
+ - select * from d1.{0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 22
+ desc: 两个index中key相同 delete 一个key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c1:c4:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ -
+ id: 23
+ desc: delete全部数据
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ - select * from {0};
+ expect:
+ count: 0
+ -
+ id: 24
+ desc: 两个索引,一个索引数据过期,删除另一个索引
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest","index2:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint"]
+ order: id
+ rows:
+ - [4,2,1]
+ - [5,2,2]
+ -
+ id: 25
+ desc: 数据过期,delete其他pk
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='bb';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 26
+ desc: 不等式删除
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1!='cc';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 27
+ desc: 比较运算符删除
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2>=2;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 28
+ desc: 表名为job delete
+ inputs:
+ -
+ name: job
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 29
+ desc: delete空表
+ inputs:
+ -
+ name: job
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - delete from {0} where c1='aa';
+ expect:
+ success: true
+ -
+ id: 30
+ desc: 组合key有一个是null
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,null,2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1=null and c2=2;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 31
+ desc: 组合key有一个是空串
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='' and c2=2;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+
diff --git a/cases/function/dml/test_insert.yaml b/cases/function/dml/test_insert.yaml
index fb93c8b2c0c..36ae56ca82b 100644
--- a/cases/function/dml/test_insert.yaml
+++ b/cases/function/dml/test_insert.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -153,6 +154,7 @@ cases:
-
id: 10
desc: 相同时间戳数据
+ mode: disk-unsupport
inputs:
-
columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
diff --git a/cases/function/dml/test_insert_prepared.yaml b/cases/function/dml/test_insert_prepared.yaml
index b6fce126821..f43f5662094 100644
--- a/cases/function/dml/test_insert_prepared.yaml
+++ b/cases/function/dml/test_insert_prepared.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -100,6 +101,7 @@ cases:
-
id: 5
desc: 相同时间戳数据
+ mode: disk-unsupport
inputs:
-
columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
diff --git a/cases/function/expression/test_arithmetic.yaml b/cases/function/expression/test_arithmetic.yaml
index 13627c7d732..bbae76d35de 100644
--- a/cases/function/expression/test_arithmetic.yaml
+++ b/cases/function/expression/test_arithmetic.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
sqlDialect: ["HybridSQL"]
cases:
- id: 0
@@ -31,16 +32,24 @@ cases:
- [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
dataProvider:
- ["%","MOD","*","-","/"]
- sql: select {0}.c2 d[0] {1}.c2 as b2,{0}.c2 d[0] {1}.c3 as b3,{0}.c2 d[0] {1}.c4 as b4,{0}.c2 d[0] {1}.c5 as b5,{0}.c2 d[0] {1}.c6 as b6,{0}.c2 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ sql: |
+ select
+ {0}.c2 d[0] {1}.c2 as b2,
+ {0}.c2 d[0] {1}.c3 as b3,
+ {0}.c2 d[0] {1}.c4 as b4,
+ {0}.c2 d[0] {1}.c5 as b5,
+ {0}.c2 d[0] {1}.c6 as b6,
+ {0}.c2 d[0] {1}.c9 as b9
+ FROM {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
expect:
columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 smallint"]
expectProvider:
0:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
1:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
2:
rows:
- [0,600,900,333,363,30]
@@ -50,7 +59,7 @@ cases:
4:
columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
rows:
- - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
- id: 1
desc: "int_算术运算_整型_正确"
inputs:
@@ -72,10 +81,10 @@ cases:
expectProvider:
0:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
1:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
2:
rows:
- [0,600,900,333,363,30]
@@ -85,7 +94,7 @@ cases:
4:
columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
rows:
- - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
- id: 2
desc: "bigint_算术运算_整型_正确"
inputs:
@@ -107,10 +116,10 @@ cases:
expectProvider:
0:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
1:
rows:
- - [0,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
2:
rows:
- [0,600,900,333,363,30]
@@ -120,7 +129,7 @@ cases:
4:
columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
rows:
- - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
- id: 3
desc: "float_算术运算_整型_正确"
inputs:
@@ -142,10 +151,10 @@ cases:
expectProvider:
0:
rows:
- - [NAN,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
1:
rows:
- - [NAN,10,0,7.8,5.8,0]
+ - [NULL,10,0,7.8,5.8,0]
2:
rows:
- [0,600,900,333,363,30]
@@ -155,7 +164,7 @@ cases:
4:
columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
rows:
- - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
- id: 4
desc: "double_算术运算_整型_正确"
inputs:
@@ -177,10 +186,10 @@ cases:
expectProvider:
0:
rows:
- - [NAN,10,0,7.7999992370605469,5.8,0]
+ - [NULL,10,0,7.7999992370605469,5.8,0]
1:
rows:
- - [NAN,10,0,7.7999992370605469,5.8,0]
+ - [NULL,10,0,7.7999992370605469,5.8,0]
2:
rows:
- [0,600,900,333.0000114440918,363,30]
@@ -190,7 +199,7 @@ cases:
4:
columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
rows:
- - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
- id: 5
desc: "+_正确"
inputs:
@@ -450,7 +459,6 @@ cases:
success: false
- id: 17
desc: "int_DIV_int_正确"
- tags: ["TODO","bug,@baoxinqi,DIV 0有问题"]
inputs:
-
columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
@@ -464,24 +472,31 @@ cases:
- [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
dataProvider:
- ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"]
- sql: select d[0] DIV {1}.c2 as b2,d[0] DIV {1}.c3 as b3,d[0] DIV {1}.c4 as b4,d[0] DIV {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ sql: |
+ select
+ d[0] DIV {1}.c2 as b2,
+ d[0] DIV {1}.c3 as b3,
+ d[0] DIV {1}.c4 as b4,
+ d[0] DIV {1}.c9 as b9
+ FROM {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
expectProvider:
0:
columns: ["b2 smallint","b3 int","b4 bigint","b9 smallint"]
rows:
- - [Infinity,1,1,Infinity]
+ - [NULL,1,1,NULL]
1:
columns: ["b2 int","b3 int","b4 bigint","b9 int"]
rows:
- - [Infinity,1,1,Infinity]
+ - [NULL,1,1,NULL]
2:
columns: ["b2 bigint","b3 bigint","b4 bigint","b9 bigint"]
rows:
- - [Infinity,0,0,Infinity]
+ - [NULL,1,1,NULL]
3:
- columns: ["b2 smallint","b3 int","b4 bigint","b9 smallint"]
+ # bool: false -> 0, true -> 1
+ columns: ["b2 smallint","b3 int","b4 bigint","b9 bool"]
rows:
- - [Infinity,1,1,Infinity]
+ - [NULL,0,0,NULL]
- id: 18
desc: "int_DIV_各种类型_错误"
level: 5
diff --git a/cases/function/expression/test_condition.yaml b/cases/function/expression/test_condition.yaml
index 51c5741a0c2..54d1dd4ad4d 100644
--- a/cases/function/expression/test_condition.yaml
+++ b/cases/function/expression/test_condition.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: SIMPLE CASE WHEN 表达式
diff --git a/cases/function/v040/test_like.yaml b/cases/function/expression/test_like.yaml
similarity index 99%
rename from cases/function/v040/test_like.yaml
rename to cases/function/expression/test_like.yaml
index 7cd6d2bfe07..d47bb57b616 100644
--- a/cases/function/v040/test_like.yaml
+++ b/cases/function/expression/test_like.yaml
@@ -15,6 +15,7 @@
db: test_zw
debugs: []
sqlDialect: ["HybridSQL"]
+version: 0.5.0
cases:
- id: 0
desc: "使用_"
diff --git a/cases/function/expression/test_logic.yaml b/cases/function/expression/test_logic.yaml
index 238f3bb0ce5..d1ce41b7825 100644
--- a/cases/function/expression/test_logic.yaml
+++ b/cases/function/expression/test_logic.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
sqlDialect: ["HybridSQL"]
cases:
- id: 0
diff --git a/cases/function/expression/test_predicate.yaml b/cases/function/expression/test_predicate.yaml
index 773fe215c78..aafa8e1adf1 100644
--- a/cases/function/expression/test_predicate.yaml
+++ b/cases/function/expression/test_predicate.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
sqlDialect: ["HybridSQL"]
cases:
- id: 0
@@ -775,3 +776,67 @@ cases:
- [4, "Ta_sub"]
- [5, "lamrb"]
- [6, null]
+ - id: rlike_predicate_1
+ desc: rlike predicate
+ inputs:
+ - columns: ["id int", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1590115420000 ]
+ - [2, 1590115430000 ]
+ - [3, 1590115440000 ]
+ - [4, 1590115450000 ]
+ - [5, 1590115460000 ]
+ - [6, 1590115470000 ]
+ - columns: ["id int", "ts timestamp", "col2 string"]
+ indexs: ["idx:id:ts"]
+ rows:
+ - [1, 1590115420000, John]
+ - [2, 1590115430000, Mary]
+ - [3, 1590115440000, mike]
+ - [4, 1590115450000, Dan]
+ - [5, 1590115460000, Evan_W]
+ - [6, 1590115470000, M]
+ dataProvider:
+ - ["RLIKE", "NOT RLIKE"] # RLIKE / NOT RLIKE
+ - ["m[A-za-z]+", "M.ry" ] # match pattern
+ sql: |
+ select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]';
+ expect:
+ columns: ["id int", "col2 string"]
+ order: id
+ expectProvider:
+ 0:
+ 0:
+ rows:
+ - [1, null]
+ - [2, null]
+ - [3, mike]
+ - [4, null]
+ - [5, null]
+ - [6, null]
+ 1:
+ rows:
+ - [1, null]
+ - [2, Mary]
+ - [3, null]
+ - [4, null]
+ - [5, null]
+ - [6, null]
+ 1:
+ 0:
+ rows:
+ - [1, John]
+ - [2, Mary]
+ - [3, null]
+ - [4, Dan]
+ - [5, Evan_W]
+ - [6, M]
+ 1:
+ rows:
+ - [1, John]
+ - [2, null]
+ - [3, mike]
+ - [4, Dan]
+ - [5, Evan_W]
+ - [6, M]
diff --git a/cases/function/expression/test_type.yaml b/cases/function/expression/test_type.yaml
index ae909e66f26..45aac74cf8b 100644
--- a/cases/function/expression/test_type.yaml
+++ b/cases/function/expression/test_type.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
sqlDialect: ["HybridSQL"]
cases:
- id: 0
diff --git a/cases/function/function/test_calculate.yaml b/cases/function/function/test_calculate.yaml
index a0955c3499d..7e4b5f5a3c9 100644
--- a/cases/function/function/test_calculate.yaml
+++ b/cases/function/function/test_calculate.yaml
@@ -15,6 +15,7 @@
db: test_zw
debugs: []
sqlDialect: ["HybridSQL"]
+version: 0.5.0
cases:
- id: 0
desc: abs-normal
diff --git a/cases/function/function/test_date.yaml b/cases/function/function/test_date.yaml
index f280304c629..66e1ce9cbbd 100644
--- a/cases/function/function/test_date.yaml
+++ b/cases/function/function/test_date.yaml
@@ -15,6 +15,7 @@
db: test_zw
debugs: []
sqlDialect: ["HybridSQL"]
+version: 0.5.0
cases:
- id: 0
desc: date_format-normal
diff --git a/cases/function/v040/test_like_match.yaml b/cases/function/function/test_like_match.yaml
similarity index 99%
rename from cases/function/v040/test_like_match.yaml
rename to cases/function/function/test_like_match.yaml
index 760fb9d4401..5300a4f85e5 100644
--- a/cases/function/v040/test_like_match.yaml
+++ b/cases/function/function/test_like_match.yaml
@@ -15,6 +15,7 @@
db: test_zw
debugs: []
sqlDialect: ["HybridSQL"]
+version: 0.5.0
cases:
- id: 0
desc: "使用_"
diff --git a/cases/function/function/test_string.yaml b/cases/function/function/test_string.yaml
index 393052a390e..4b9220122f0 100644
--- a/cases/function/function/test_string.yaml
+++ b/cases/function/function/test_string.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: "concat_各种类型组合"
diff --git a/cases/function/function/test_udaf_function.yaml b/cases/function/function/test_udaf_function.yaml
index f6f5d418695..7641f73a648 100644
--- a/cases/function/function/test_udaf_function.yaml
+++ b/cases/function/function/test_udaf_function.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -113,6 +114,7 @@ cases:
-
id: 4
desc: avg
+ version: 0.6.0
sqlDialect: ["HybridSQL"]
inputs:
-
@@ -136,27 +138,27 @@ cases:
expect:
order: id
columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"]
- data: |
- 1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL
- 2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0
- 3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5
- 4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5
+ rows:
+ - [1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL]
+ - [2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0]
+ - [3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5]
+ - [4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5]
-
id: 5
desc: distinct_count
sqlDialect: ["HybridSQL"]
inputs:
-
- columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
- indexs: ["index1:c1:c7"]
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool","ts timestamp"]
+ indexs: ["index1:c1:ts"]
rows:
- - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
- - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false]
- - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true]
- - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true,1590738990000]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false,1590738991000]
+ - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true,1590738992000]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL,1590738993000]
sql: |
SELECT {0}.id, c1, distinct_count(c2) OVER w1 as m2,distinct_count(c3) OVER w1 as m3,distinct_count(c4) OVER w1 as m4,distinct_count(c5) OVER w1 as m5,distinct_count(c6) OVER w1 as m6,distinct_count(c7) OVER w1 as m7,distinct_count(c8) OVER w1 as m8,distinct_count(c9) OVER w1 as m9 FROM {0} WINDOW
- w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
expect:
order: id
columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"]
@@ -359,6 +361,7 @@ cases:
id: 15
desc: SUM_WHERE-normal
sqlDialect: ["HybridSQL"]
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
@@ -390,15 +393,16 @@ cases:
id: 16
desc: AVG_WHERE-normal
sqlDialect: ["HybridSQL"]
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
indexs: ["index1:c1:c7"]
- data: |
- 1, aa, 1, 1, 30, NULL,2.1, 1590738990000, 2020-05-01, a, true
- 2, aa, 4, 4, NULL,1.4, 2.4, 1590738991000, 2020-05-03, c, false
- 3, aa, 3, NULL,32, 1.3, 2.3, 1590738992000, 2020-05-02, b, true
- 4, aa, NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL
+ rows:
+ - [1, "aa", 1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01", "a", true]
+ - [2, "aa", 4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03", "c", false]
+ - [3, "aa", 3, NULL,32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true]
+ - [4, "aa", NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL]
sql: |
SELECT {0}.id, c1,
avg_where(c2, c2<4) OVER w1 as m2,
@@ -412,11 +416,11 @@ cases:
expect:
order: id
columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"]
- data: |
- 1, aa, 1, 1, 30, NULL, 2.1, NULL
- 2, aa, 1, 1, 30, NULL, 2.1, NULL
- 3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL
- 4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL
+ rows:
+ - [1, aa, 1, 1, 30, NULL, 2.1, NULL]
+ - [2, aa, 1, 1, 30, NULL, 2.1, NULL]
+ - [3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL]
+ - [4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL]
-
id: 17
desc: COUNT_WHERE-normal
@@ -431,17 +435,28 @@ cases:
- [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
- [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
sql: |
- SELECT {0}.id, c1, count_where(c2,c2<4) OVER w1 as m2,count_where(c3,c3<4) OVER w1 as m3,count_where(c4,c4<33) OVER w1 as m4,count_where(c5,c5<=1.3) OVER w1 as m5,count_where(c6,c10) OVER w1 as m6,
- count_where(c7,c10) OVER w1 as m7,count_where(c8,c10) OVER w1 as m8,count_where(c9,c10) OVER w1 as m9, count_where(*,c3<4) over w1 as m10 FROM {0} WINDOW
- w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ SELECT
+ {0}.id, c1,
+ count_where(c2,c2<4) OVER w1 as m2,
+ count_where(c3,c3<4) OVER w1 as m3,
+ count_where(c4,c4<33) OVER w1 as m4,
+ count_where(c5,c5<=1.3) OVER w1 as m5,
+ count_where(c6,c10) OVER w1 as m6,
+ count_where(c7,c10) OVER w1 as m7,
+ count_where(c8,c10) OVER w1 as m8,
+ count_where(c9,c10) OVER w1 as m9,
+ count_where(c10,c3<4) over w1 as m10,
+ count_where(*,c3<4) over w1 as m11
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
expect:
order: id
- columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint","m10 bigint"]
+ columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint","m10 bigint", "m11 bigint"]
rows:
- - [1,"aa",1,1,1,1,1,1,1,1,1]
- - [2,"aa",1,1,1,1,1,1,1,1,1]
- - [3,"aa",2,2,2,2,2,2,2,2,2]
- - [4,"aa",1,1,1,1,1,1,1,1,1]
+ - [1, "aa", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ - [2, "aa", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ - [3, "aa", 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ - [4, "aa", 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
-
id: 18
desc: AVG_WHERE/MAX_WHERE/MIN_WHERE/SUM_WHERE-fail
@@ -464,28 +479,6 @@ cases:
w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
expect:
success: false
- -
- id: 19
- desc: COUNT_WHERE-fail
- sqlDialect: ["HybridSQL"]
- level: 5
- inputs:
- -
- columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
- indexs: ["index1:c1:c7"]
- rows:
- - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
- - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
- - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
- - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
- dataProvider:
- - ["count_where"]
- - ["c10"]
- sql: |
- SELECT {0}.id, c1, d[0](d[1],c10) OVER w1 as m2 FROM {0} WINDOW
- w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
- expect:
- success: false
-
id: 20
desc: max_cate-normal
@@ -2181,6 +2174,7 @@ cases:
- id: 52
desc: 多个可合并窗口上的多个聚合函数计算
sqlDialect: ["HybridSQL"]
+ version: 0.6.0
sql: |
SELECT {0}.id, pk, col1, std_ts,
distinct_count(col1) OVER w1 as a1,
@@ -2236,6 +2230,7 @@ cases:
- id: 53
desc: 同窗口下多类聚合函数
sqlDialect: ["HybridSQL"]
+ version: 0.6.0
sql: |
SELECT {0}.id, pk, col1, std_ts,
sum(col1 + count(col1)) OVER w as a1,
@@ -2403,15 +2398,15 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
order: id
- data: |
- 1, 1, 1, NULL, NULL
- 2, 2, 2, 1, NULL
- 3, 3, 3, 2, NULL
- 4, 4, 4, 3, 1
- 5, 5, 5, 4, 2
- 6, 4, 4, NULL, NULL
- 7, 3, 3, 4, NULL
- 8, 2, 2, 3, NULL
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, 1, NULL]
+ - [3, 3, 3, 2, NULL]
+ - [4, 4, 4, 3, 1]
+ - [5, 5, 5, 4, 2]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, 4, NULL]
+ - [8, 2, 2, 3, NULL]
- id: 58
desc: |
@@ -2442,15 +2437,15 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
order: id
- data: |
- 1, 1, 1, NULL, NULL
- 2, 2, 2, NULL, 1
- 3, 3, 3, NULL, 2
- 4, 4, 4, 1, 3
- 5, 5, 5, 2, 4
- 6, 4, 4, NULL, NULL
- 7, 3, 3, NULL, 4
- 8, 2, 2, NULL, 3
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, NULL, 1]
+ - [3, 3, 3, NULL, 2]
+ - [4, 4, 4, 1, 3]
+ - [5, 5, 5, 2, 4]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, NULL, 4]
+ - [8, 2, 2, NULL, 3]
- id: 59
desc: |
@@ -2482,15 +2477,15 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
order: id
- data: |
- 1, 1, 1, NULL, NULL
- 2, 2, 2, 1, NULL
- 3, 3, 3, 2, NULL
- 4, 4, 4, 3, 1
- 5, 5, 5, 4, 2
- 6, 4, 4, NULL, NULL
- 7, 3, 3, 4, NULL
- 8, 2, 2, 3, NULL
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, 1, NULL]
+ - [3, 3, 3, 2, NULL]
+ - [4, 4, 4, 3, 1]
+ - [5, 5, 5, 4, 2]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, 4, NULL]
+ - [8, 2, 2, 3, NULL]
- id: 60
desc: |
@@ -2521,19 +2516,20 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
order: id
- data: |
- 1, 1, 1, NULL, NULL
- 2, 2, 2, NULL, 1
- 3, 3, 3, NULL, 2
- 4, 4, 4, 1, 3
- 5, 5, 5, 2, 4
- 6, 4, 4, NULL, NULL
- 7, 3, 3, NULL, 4
- 8, 2, 2, NULL, 3
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, NULL, 1]
+ - [3, 3, 3, NULL, 2]
+ - [4, 4, 4, 1, 3]
+ - [5, 5, 5, 2, 4]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, NULL, 4]
+ - [8, 2, 2, NULL, 3]
- id: 61
desc: median
sqlDialect: ["HybridSQL"]
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
diff --git a/cases/function/function/test_udf_function.yaml b/cases/function/function/test_udf_function.yaml
index 8e985742376..aed881767f1 100644
--- a/cases/function/function/test_udf_function.yaml
+++ b/cases/function/function/test_udf_function.yaml
@@ -15,6 +15,7 @@
db: test_zw
debugs: []
sqlDialect: ["HybridSQL"]
+version: 0.5.0
cases:
- id: 0
desc: 默认udf null处理逻辑:返回null
@@ -82,7 +83,171 @@ cases:
rows:
- [1, 1, 1, 1, 0]
+ - id: 3
+ desc: udf regexp_like
+ inputs:
+ - columns: ["id int", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1590115420000 ]
+ - [2, 1590115430000 ]
+ - [3, 1590115440000 ]
+ - [4, 1590115450000 ]
+ - [5, 1590115460000 ]
+ - [6, 1590115470000 ]
+ - columns: ["id int", "ts timestamp", "col2 string"]
+ indexs: ["index1:id:ts"]
+ rows:
+ - [1, 1590115420000, contact@openmldb.ai]
+ - [2, 1590115430000, contact@opfnmldb.ai]
+ - [3, 1590115440000, contact@opgnmldb.ai]
+ - [4, 1590115450000, contact@ophnmldb.ai]
+ - [5, 1590115460000, contact@dropmldb.ai]
+ - [6, 1590115470000, contact@closemldb.ai]
+ dataProvider:
+ - ["regexp_like", "NOT regexp_like"] # regexp_like / NOT regexp_like
+ - ["[A-Za-z0-9+_.-]+@openmldb[A-Za-z0-9+_.-]+"] # match pattern
+ sql: |
+ select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id and d[0](col2,'d[1]');
+ expect:
+ columns: ["id int", "col2 string"]
+ order: id
+ expectProvider:
+ 0:
+ 0:
+ rows:
+ - [1, contact@openmldb.ai]
+ - [2, null]
+ - [3, null]
+ - [4, null]
+ - [5, null]
+ - [6, null]
+ 1:
+ 0:
+ rows:
+ - [1, null]
+ - [2, contact@opfnmldb.ai]
+ - [3, contact@opgnmldb.ai]
+ - [4, contact@ophnmldb.ai]
+ - [5, contact@dropmldb.ai]
+ - [6, contact@closemldb.ai]
-
-
-
+# reserved case
+# For more details, please checkout https://github.com/4paradigm/OpenMLDB/pull/2187
+# - id: 4
+# desc: udf regexp_like with flags
+# inputs:
+# - columns: ["id int", "ts timestamp"]
+# indexs: ["index1:id:ts"]
+# rows:
+# - [1, 1590115420000]
+# - [2, 1590115420001]
+# - [3, 1590115420002]
+# - columns: ["id int", "ts timestamp", "col2 string"]
+# indexs: ["index1:id:ts"]
+# rows:
+# - [1, 1590115420000, "the Lord of the Rings"]
+# - [2, 1590115420001, "The Lord of the Rings"]
+# - [3, 1590115420002, "The Lord of the Rings\nJ. R. R. Tolkien"]
+# dataProvider:
+# - ["The Lord of the Rings", "The Lord of the Rings.J\\\\\\\\. R\\\\\\\\. R\\\\\\\\. Tolkien", "^The Lord of the Rings$.J\\\\\\\\. R\\\\\\\\. R\\\\\\\\. Tolkien"] # match pattern
+# - ["i", "s", "m", "smi", "c", ""] # flags
+# sql: |
+# select {0}.id, {1}.col2 from {0} last join {1} on {0}.id = {1}.id and regexp_like(col2, "d[0]", "d[1]");
+# expect:
+# columns: ["id int", "col2 string"]
+# order: id
+# expectProvider:
+# 0:
+# 0:
+# rows:
+# - [1, "the Lord of the Rings"]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 1:
+# rows:
+# - [1, null]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 2:
+# rows:
+# - [1, null]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 3:
+# rows:
+# - [1, "the Lord of the Rings"]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 4:
+# rows:
+# - [1, null]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 5:
+# rows:
+# - [1, null]
+# - [2, "The Lord of the Rings"]
+# - [3, null]
+# 1:
+# 0:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 1:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "The Lord of the Rings\nJ. R. R. Tolkien"]
+# 2:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 3:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "The Lord of the Rings\nJ. R. R. Tolkien"]
+# 4:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 5:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 2:
+# 0:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 1:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 2:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 3:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "The Lord of the Rings\nJ. R. R. Tolkien"]
+# 4:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
+# 5:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, null]
\ No newline at end of file
diff --git a/cases/function/fz_ddl/test_bank.yaml b/cases/function/fz_ddl/test_bank.yaml
index 6d71e4d3bca..4b725afd22c 100644
--- a/cases/function/fz_ddl/test_bank.yaml
+++ b/cases/function/fz_ddl/test_bank.yaml
@@ -1,4 +1,19 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
db: bank
+version: 0.5.0
cases:
- desc: bank test
id: 0
diff --git a/cases/function/fz_ddl/test_luoji.yaml b/cases/function/fz_ddl/test_luoji.yaml
index c1673497e22..65b8056909f 100644
--- a/cases/function/fz_ddl/test_luoji.yaml
+++ b/cases/function/fz_ddl/test_luoji.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: luoji
+version: 0.5.0
cases:
- id: 0
desc: luoji test
diff --git a/cases/function/fz_ddl/test_myhug.yaml b/cases/function/fz_ddl/test_myhug.yaml
index 7ed43b3315f..02d0f971040 100644
--- a/cases/function/fz_ddl/test_myhug.yaml
+++ b/cases/function/fz_ddl/test_myhug.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: mybug
+version: 0.5.0
cases:
- id: 0
desc: mybug test
diff --git a/cases/function/join/test_lastjoin_complex.yaml b/cases/function/join/test_lastjoin_complex.yaml
index d93887d55b5..07b65aec95c 100644
--- a/cases/function/join/test_lastjoin_complex.yaml
+++ b/cases/function/join/test_lastjoin_complex.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: lastjoin+窗口
@@ -57,6 +58,7 @@ cases:
- [5,"bb",24,34,68]
- id: 1
desc: lastjoin+窗口-没有匹配的列
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -946,7 +948,7 @@ cases:
success: true
columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
rows:
- - [ 1, 2020-05-01, 20, 30 ]
+ - [ 1, '2020-05-01', 20, 30 ]
- id: 17-2
desc: 两个子查询lastjoin,order不是主表的ts-离线支持
@@ -998,7 +1000,7 @@ cases:
success: true
columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
rows:
- - [ 1, 2020-05-01, 20, 30 ]
+ - [ 1, '2020-05-01', 20, 30 ]
- id: 18-2
desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-离线支持
@@ -1026,7 +1028,7 @@ cases:
success: true
columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
rows:
- - [ 1, 2020-05-01, 20, 30 ]
+ - [ 1, '2020-05-01', 20, 30 ]
- id: 19-1
desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby
diff --git a/cases/function/join/test_lastjoin_simple.yaml b/cases/function/join/test_lastjoin_simple.yaml
index 9b1936f4014..4d23b312ef2 100644
--- a/cases/function/join/test_lastjoin_simple.yaml
+++ b/cases/function/join/test_lastjoin_simple.yaml
@@ -13,7 +13,8 @@
# limitations under the License.
db: test_zw
-debugs: ["正常拼接"]
+debugs: []
+version: 0.5.0
cases:
- id: 1
desc: 正常拼接
@@ -1020,4 +1021,50 @@ cases:
order: c1
rows:
- [ "aa", 2, 13, 1590738989000 ]
- - [ "bb", 21, 131, 1590738990000 ]
\ No newline at end of file
+ - [ "bb", 21, 131, 1590738990000 ]
+ -
+ id: 12
+ desc: 不指定索引,进行lastjoin
+ tags: ["TODO","cpp ut失败"]
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "dd", 41, NULL, NULL ]
+ -
+ id: 13
+ desc: 不指定索引,进行lastjoin,匹配多行
+ tags: ["TODO","cpp ut失败"]
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,131,1590738990000 ]
+ - [ "bb",21,NULL,NULL ]
+ - [ "dd", 41, NULL, NULL ]
\ No newline at end of file
diff --git a/cases/function/long_window/long_window.yaml b/cases/function/long_window/long_window.yaml
new file mode 100644
index 00000000000..7344aca2cce
--- /dev/null
+++ b/cases/function/long_window/long_window.yaml
@@ -0,0 +1,357 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: ["options(long_window='w1:2h')"]
+cases:
+ -
+ id: 0
+ desc: options(long_window='w1:2')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 1
+ desc: options(long_window='w1:2d')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7::latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 2
+ desc: options(long_window='w1:2h')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7::latest"]
+# rows:
+# - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+# - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+# - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+# - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+# - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sqls:
+ - deploy deploy_{0} options(long_windows='w1:2d') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW);
+ - show deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY {0} SELECT
+ id,
+ c1,
+ sum(c4) OVER (w1) AS w1_c4_sum
+ FROM
+ {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1
+ ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)
+ ;
+ inColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,c3,kInt32,NO
+ - 4,c4,kInt64,NO
+ - 5,c5,kFloat,NO
+ - 6,c6,kDouble,NO
+ - 7,c7,kTimestamp,NO
+ - 8,c8,kDate,NO
+ outColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,w1_c4_sum,kInt64,NO
+ -
+ id: 3
+ desc: options(long_window='w1:2m')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2m') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 4
+ desc: options(long_window='w1:2s')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2s') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 5
+ desc: avg算子(smallint, int, bigint, float, double, string)
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2') SELECT id, c1, avg(c4) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 6
+ desc: min算子(smallint, int, bigint, float, double, string)
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2d') SELECT id, c1, min(c4) OVER w1 as w1_c4_min FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 7
+ desc: max算子(smallint, int, bigint, float, double, string)
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2h') SELECT id, c1, max(c4) OVER w1 as w1_c4_max FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 8
+ desc: count算子(smallint, int, bigint, float, double, string)
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2m') SELECT id, c1, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 9
+ desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, avg(c4) OVER w1 as w1_c4_avg from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 10
+ desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2,w2:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 11
+ desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口)
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 12
+ desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:2') SELECT id, c1, sum(c5) OVER w1 as w1_c5_sum,
+ avg(c5) OVER w2 as w2_c5_avg from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 13
+ desc: 窗口名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 14
+ desc: options(long_window='w1:2y')
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w9090:2') SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ -
+ id: 15
+ desc: options格式错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
diff --git a/cases/function/long_window/test_count_where.yaml b/cases/function/long_window/test_count_where.yaml
new file mode 100644
index 00000000000..84740eaa889
--- /dev/null
+++ b/cases/function/long_window/test_count_where.yaml
@@ -0,0 +1,540 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: ["长窗口count_where,date类型","长窗口count_where,rows"]
+cases:
+ -
+ id: 0
+ desc: 长窗口count_where,date类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,2,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,3,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,4,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,5,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 0-1
+ desc: 长窗口count_where,rows
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 1
+ desc: 长窗口count_where,smallint类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 2
+ desc: 长窗口count_where,int类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 3
+ desc: 长窗口count_where,bigint类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 4
+ desc: 长窗口count_where,string类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 5
+ desc: 长窗口count_where,timestamp类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 6
+ desc: 长窗口count_where,row类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 7
+ desc: 长窗口count_where,bool类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 8
+ desc: 长窗口count_where,float类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 9
+ desc: 长窗口count_where,double类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 10
+ desc: 长窗口count_where,第二个参数使用bool列
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 11
+ desc: 长窗口count_where,第二个参数使用=
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",0]
+ - [3,"aa",0]
+ - [4,"aa",1]
+ - [5,"aa",1]
+ -
+ id: 12
+ desc: 长窗口count_where,第二个参数使用!=
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",2]
+ -
+ id: 13
+ desc: 长窗口count_where,第二个参数使用>=
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",1]
+ - [3,"aa",2]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ -
+ id: 14
+ desc: 长窗口count_where,第二个参数使用<=
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 15
+ desc: 长窗口count_where,第二个参数使用>
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",1]
+ - [3,"aa",2]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ -
+ id: 17
+ desc: 长窗口count_where,第二个参数使用and
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 18
+ desc: 长窗口count_where,第二个参数使用两个列
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 19
+ desc: 长窗口count_where,第二个参数使用嵌套
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 20
+ desc: 长窗口count_where,第二个参数常量在前
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738990001,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738990002,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738990003,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738990004,"2020-05-05",false]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+
+
+
diff --git a/cases/function/multiple_databases/test_multiple_databases.yaml b/cases/function/multiple_databases/test_multiple_databases.yaml
index 9145e2219f0..208270b4ae5 100644
--- a/cases/function/multiple_databases/test_multiple_databases.yaml
+++ b/cases/function/multiple_databases/test_multiple_databases.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: Last Join tables from two databases 1 - default db is db1
@@ -32,7 +33,7 @@ cases:
- [ "aa",2,13,1590738989000 ]
- [ "bb",21,131,1590738990000 ]
- [ "cc",41,151,1590738992000 ]
- sql: select {0}.c1,{0}.c2,db2.{1}.c3,db2.{1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on {0}.c1=db2.{1}.c1;
+ sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
expect:
order: c1
columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
@@ -113,7 +114,7 @@ cases:
success: false
- id: 4
desc: 全部使用默认库
- db: db
+ db: test_zw
inputs:
- columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
indexs: [ "index1:c1:c4" ]
@@ -137,7 +138,7 @@ cases:
- [ "cc",41,151,1590738992000 ]
- id: 5
desc: 指定当前库查询
- db: db
+ db: test_zw
inputs:
- columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
indexs: [ "index1:c1:c4" ]
@@ -151,7 +152,7 @@ cases:
- [ "aa",2,13,1590738989000 ]
- [ "bb",21,131,1590738990000 ]
- [ "cc",41,151,1590738992000 ]
- sql: select db.{0}.c1,db.{0}.c2,db.{1}.c3,db.{1}.c4 from db.{0} last join db.{1} ORDER BY db.{1}.c3 on db.{0}.c1=db.{1}.c1;
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
expect:
order: c1
columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
@@ -161,7 +162,7 @@ cases:
- [ "cc",41,151,1590738992000 ]
- id: 6
desc: 查询使用其他库
- db: db
+ db: test_zw
inputs:
-
columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
diff --git a/cases/function/out_in/test_out_in.yaml b/cases/function/out_in/test_out_in.yaml
index 62de26ea78d..e7ac9134dfd 100644
--- a/cases/function/out_in/test_out_in.yaml
+++ b/cases/function/out_in/test_out_in.yaml
@@ -13,7 +13,7 @@
# limitations under the License.
db: test_zw
-debugs: []
+debugs: ['数据里有null、空串、特殊字符']
cases:
-
id: 0
diff --git a/cases/function/select/test_select_sample.yaml b/cases/function/select/test_select_sample.yaml
index 6b1bfe9892f..10a14bf3707 100644
--- a/cases/function/select/test_select_sample.yaml
+++ b/cases/function/select/test_select_sample.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: 查询所有列
@@ -290,4 +291,18 @@ cases:
columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"]
order: sum_col1
rows:
- - [15, 5, 5, 1, 3]
\ No newline at end of file
+ - [15, 5, 5, 1, 3]
+ -
+ id: 14
+ desc: 不指定索引,插入数据,可查询
+ tags: ["TODO","CPP ut不支持 id int not null 解析"]
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ sql: select * from {0};
+ expect:
+ columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true]
\ No newline at end of file
diff --git a/cases/function/select/test_sub_select.yaml b/cases/function/select/test_sub_select.yaml
index 381f7cae058..2956df1fc9b 100644
--- a/cases/function/select/test_sub_select.yaml
+++ b/cases/function/select/test_sub_select.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -343,3 +344,17 @@ cases:
sql: select id,v2,v2 from (select id,c2+1 as v2,c3+1 as v2 from {0});
expect:
success: false
+ -
+ id: 15
+ desc: 不指定索引,进行子查询操作
+ tags: ["TODO","CPP ut不支持 id int not null 解析"]
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0});
+ expect:
+ columns : ["c1 int","c2 int"]
+ order: id
+ rows:
+ - [1,1]
diff --git a/cases/function/select/test_where.yaml b/cases/function/select/test_where.yaml
index 427edcfc29d..8705209bdea 100644
--- a/cases/function/select/test_where.yaml
+++ b/cases/function/select/test_where.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
sqlDialect: ["HybridSQL"]
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: Where条件命中索引
@@ -21,8 +22,8 @@ cases:
sql: |
SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5;
inputs:
- - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
- index: index1:col2:col5
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
data: |
0, 1, 5, 1.1, 11.1, 1, 1
0, 2, 5, 2.2, 22.2, 2, 22
@@ -140,8 +141,8 @@ cases:
sql: |
SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=col3 and col1 < 2;
inputs:
- - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
- index: index1:col2:col5
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
data: |
0, 1, 5, 1.1, 11.1, 1, 1
0, 2, 5, 2.2, 22.2, 2, 22
@@ -249,3 +250,4 @@ cases:
order: sum_col1
rows:
- [3, 2, 2, 1, 1.5]
+
diff --git a/cases/function/test_batch_request.yaml b/cases/function/test_batch_request.yaml
index c333ac68b92..9f3134806e1 100644
--- a/cases/function/test_batch_request.yaml
+++ b/cases/function/test_batch_request.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: batch request without common column
@@ -252,6 +253,7 @@ cases:
- [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"]
- id: 6
desc: batch request with one common window and one non-common window
+ mode: disk-unsupport
inputs:
-
columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
@@ -289,6 +291,7 @@ cases:
- id: 7
desc: batch request with common window and common and non-common aggregations, window is small
+ mode: disk-unsupport
inputs:
-
columns: ["id int","k1 bigint","k2 timestamp",
@@ -324,6 +327,7 @@ cases:
- id: 8
desc: batch request with one common window and one non-common window, current time == history time
+ mode: disk-unsupport
inputs:
-
columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
diff --git a/cases/function/test_index_optimized.yaml b/cases/function/test_index_optimized.yaml
index a42d66cd0a9..78e05a96131 100644
--- a/cases/function/test_index_optimized.yaml
+++ b/cases/function/test_index_optimized.yaml
@@ -13,7 +13,8 @@
# limitations under the License.
db: test_zw
-debugs: [ ]
+debugs: []
+version: 0.5.0
cases:
- id: 0
desc: window optimized one key one ts
diff --git a/cases/function/tmp/test_current_time.yaml b/cases/function/tmp/test_current_time.yaml
new file mode 100644
index 00000000000..528113cf3e5
--- /dev/null
+++ b/cases/function/tmp/test_current_time.yaml
@@ -0,0 +1,106 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ - id: 0
+ desc: ts列的值为0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,0,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,31 ]
+ - [ "aa",22,32 ]
+ - [ "aa",23,33 ]
+ - [ "bb",24,34 ]
+ - id: 1
+ desc: ts列的值为0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 2
+ desc: ts列的值为-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,-1,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,31 ]
+ - [ "aa",22,32 ]
+ - [ "aa",23,33 ]
+ - [ "bb",24,34 ]
+# - id: 2
+# desc: ts列的值为1
+# inputs:
+# - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+# indexs: [ "index1:c1:c7" ]
+# rows:
+# - [ "aa",20,30,1.1,2.1,1,"2020-05-01" ]
+# - [ "aa",21,31,1.2,2.2,1,"2020-05-02" ]
+# - [ "aa",22,32,1.3,2.3,1,"2020-05-03" ]
+# - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+# - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+# sql: |
+# SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+# expect:
+# order: c3
+# columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+# rows:
+# - [ "aa",20,30 ]
+# - [ "aa",21,31 ]
+# - [ "aa",22,32 ]
+# - [ "aa",23,33 ]
+# - [ "bb",24,34 ]
diff --git a/cases/function/v040/test_groupby.yaml b/cases/function/v040/test_groupby.yaml
index a44b93e6cfb..7150588bedd 100644
--- a/cases/function/v040/test_groupby.yaml
+++ b/cases/function/v040/test_groupby.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
sqlDialect: ["HybridSQL"]
cases:
- id: 0
@@ -31,7 +32,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",2]
- ["bb",2]
@@ -52,7 +53,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",2]
- ["bb",2]
@@ -73,7 +74,7 @@ cases:
sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
expect:
order: c1
- columns: ["c1 string","c2 int","v1 int"]
+ columns: ["c1 string","c2 int","v1 bigint"]
rows:
- ["aa",11,2]
- ["bb",11,2]
@@ -94,7 +95,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 int","v1 bigint"]
rows:
- [11,2]
- [22,2]
@@ -114,7 +115,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 bigint","v1 bigint"]
rows:
- [11,2]
- [22,2]
@@ -134,7 +135,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 smallint","v1 bigint"]
rows:
- [11,2]
- [22,2]
@@ -186,7 +187,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 date","v1 bigint"]
rows:
- ["2020-05-01",2]
- ["2020-05-02",2]
@@ -206,7 +207,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 timestamp","v1 bigint"]
rows:
- [11,2]
- [22,2]
@@ -226,7 +227,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 bool","v1 bigint"]
rows:
- [true,3]
- [false,2]
@@ -246,7 +247,7 @@ cases:
sql: select c1,count(*) as v1 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["",2]
- [null,2]
@@ -267,7 +268,7 @@ cases:
sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
expect:
order: c1
- columns: ["c1 string","c2 int","v1 int"]
+ columns: ["c1 string","c2 int","v1 bigint"]
rows:
- ["aa",11,2]
- ["bb",11,2]
@@ -288,7 +289,7 @@ cases:
- [6,"aa",11,1590738995000]
sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
expect:
- columns: ["c1 string","c2 int","v1 int"]
+ columns: ["c1 string","c2 int","v1 bigint"]
rows:
- ["aa",12,1]
- ["bb",11,2]
@@ -326,7 +327,7 @@ cases:
sql: select c1,count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0} group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int","v2 int","v3 int","v4 double","v5 bigint"]
+ columns: ["c1 string","v1 bigint","v2 int","v3 int","v4 double","v5 int"]
rows:
- ["aa",3,6,1,3.333333,10]
- ["bb",2,5,2,3.5,7]
@@ -362,7 +363,7 @@ cases:
sql: select c1,count(c2) as v1 from {0} group by c1 having count(c2)>1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",3]
- ["bb",2]
@@ -455,7 +456,7 @@ cases:
sql: select t1.c1,t1.v1,t2.v1 from (select c1,sum(c2) as v1 from {0} group by c1) as t1 last join (select c1,sum(c2) as v1 from {1} group by c1) as t2 on t1.c1=t2.c1;
expect:
order: c1
- columns: [ "c1 string","v1 bigint","v1 bigint"]
+ columns: [ "c1 string","v1 int","v1 int"]
rows:
- [ "aa",23,2 ]
- [ "cc",41,62 ]
@@ -491,7 +492,7 @@ cases:
sql: select c1,count(*) as v1 from (select * from {0}) as t group by c1;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",2]
- ["bb",2]
@@ -511,7 +512,7 @@ cases:
sql: select * from (select c1,count(*) as v1 from {0} group by c1);
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",2]
- ["bb",2]
@@ -548,7 +549,7 @@ cases:
sql: select * from (select c1,count(*) as v1 from {0} group by c1) where v1=2;
expect:
order: c1
- columns: ["c1 string","v1 int"]
+ columns: ["c1 string","v1 bigint"]
rows:
- ["aa",2]
- ["bb",2]
diff --git a/cases/function/v040/test_udaf.yaml b/cases/function/v040/test_udaf.yaml
index ba325e33fdb..fee7f58b800 100644
--- a/cases/function/v040/test_udaf.yaml
+++ b/cases/function/v040/test_udaf.yaml
@@ -30,7 +30,7 @@ cases:
- [5,"bb",1590738994000]
sql: select count(*) as v1 from {0};
expect:
- columns: ["v1 int"]
+ columns: ["v1 bigint"]
rows:
- [5]
- id: 1
@@ -64,7 +64,7 @@ cases:
sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
expect:
order: c1
- columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"]
+ columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"]
rows:
- [6,6,1,3.5,21]
- id: 3
@@ -77,7 +77,7 @@ cases:
sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
expect:
order: c1
- columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"]
+ columns: ["v1 int","v2 int","v3 int","v4 double","v5 int"]
rows:
- [0,0,0,0,0]
- id: 4
@@ -96,7 +96,7 @@ cases:
sql: select count(c1) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
expect:
order: c1
- columns: ["v1 int","v2 int","v3 int","v4 double","v5 bigint"]
+ columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"]
rows:
- [5,6,1,3.6,18]
diff --git a/cases/function/window/error_window.yaml b/cases/function/window/error_window.yaml
index 82b16fee5e6..9e9419bc74f 100644
--- a/cases/function/window/error_window.yaml
+++ b/cases/function/window/error_window.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: no order by
diff --git a/cases/function/window/test_current_row.yaml b/cases/function/window/test_current_row.yaml
new file mode 100644
index 00000000000..4c0b5d7ba3f
--- /dev/null
+++ b/cases/function/window/test_current_row.yaml
@@ -0,0 +1,768 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.6.0
+cases:
+ - id: 0
+ desc: rows-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 1
+ desc: rows_range-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 2
+ desc: rows-current_row-有和当前行ts一致的数据
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 3
+ desc: rows_range-current_row-有和当前行ts一致的数据
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 4
+ desc: rows-纯历史窗口-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 5
+ desc: rows_range-纯历史窗口-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 6
+ desc: rows-current_row-ts=0
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 7
+ desc: rows_range-current_row-ts=0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 8
+ desc: rows-current_row-ts=-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 9
+ desc: rows_range-current_row-ts=-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 10
+ desc: rows-current_row-ts=负数和0
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 11
+ desc: rows_range-current_row-ts=负数和0
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 12
+ desc: rows-open-current_row
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 13
+ desc: rows_range-open-current_row
+ tags: ["TODO","bug,修复后验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "aa",24,34,1.5,2.5,1590738993000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "aa",24,32 ]
+ - id: 14
+ desc: rows_range-current_row-maxsize小于窗口
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 15
+ desc: rows_range-current_row-maxsize大于窗口
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 3 EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 16
+ desc: rows-current_row-current_time
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 17
+ desc: rows_range-current_row-current_time
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 18
+ desc: window union rows-current_row-instance_not_in_window
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,63]
+ - [5,"ee",21,null]
+ - id: 19
+ desc: window union rows_range-current_row-instance_not_in_window
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,32]
+ - [5,"ee",21,null]
+ - id: 20
+ desc: window union rows-current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,62]
+ - [5,"ee",21,null]
+ - id: 21
+ desc: window union rows_range-current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,62]
+ - [5,"ee",21,null]
+ - id: 22
+ desc: rows窗口包含open/maxsize/instance_not_in_window/current_time/current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,67]
+ - [5,"ee",21,null]
+ - id: 23
+ desc: rows_range窗口包含open/maxsize/instance_not_in_window/current_time/current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 1 EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32]
+ - [4,"dd",20,35]
+ - [5,"ee",21,null]
+ - id: 24
+ desc: rows-lag-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 25
+ desc: rows_range-lag-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 26
+ desc: rows-at-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 27
+ desc: rows_range-at-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 28
+ desc: 两个窗口,一个rows,一个rows_range,current_row
+ tags: ["TODO","bug,修复后验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c5) OVER w2 as w2_c5_count FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint","w2_c5_count bigint" ]
+ rows:
+ - [ "aa",20,null,0 ]
+ - [ "aa",21,30,1 ]
+ - [ "aa",22,61,2 ]
+ - [ "aa",23,63,2 ]
+ - [ "bb",24,null,0 ]
+ - id: 29
+ desc: current_row小写
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW exclude current_row);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 30
+ desc: maxsize位置错误
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW MAXSIZE 2);
+ expect:
+ success: false
+ - id: 31
+ desc: rows-纯历史窗口-current_row-ts=0
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 32
+ desc: rows_range-纯历史窗口-current_row-ts=0
+ tags: ["TODO","bug,修复后验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,2000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,93 ]
+ - [ "bb",24,null ]
+
diff --git a/cases/function/window/test_maxsize.yaml b/cases/function/window/test_maxsize.yaml
index 0729b5535d6..28af076d27a 100644
--- a/cases/function/window/test_maxsize.yaml
+++ b/cases/function/window/test_maxsize.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -140,6 +141,7 @@ cases:
-
id: 6
desc: 纯历史窗口-maxsize
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -164,6 +166,7 @@ cases:
-
id: 7
desc: 没有数据进入maxsize的窗口
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
diff --git a/cases/function/window/test_window.yaml b/cases/function/window/test_window.yaml
index 5bbfe138ab8..80731888843 100644
--- a/cases/function/window/test_window.yaml
+++ b/cases/function/window/test_window.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -96,6 +97,7 @@ cases:
-
id: 3
desc: 一个pk所有数据都不在窗口内
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -112,9 +114,9 @@ cases:
order: id
columns: ["id int","c1 string","w1_c4_sum bigint"]
rows:
- - [1,"aa",0]
- - [2,"aa",0]
- - [3,"aa",0]
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
-
id: 4
desc: 窗口只要当前行
@@ -162,6 +164,7 @@ cases:
-
id: 6
desc: 最后一行进入窗口
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -178,12 +181,13 @@ cases:
order: id
columns: ["id int","c1 string","w1_c4_sum bigint"]
rows:
- - [1,"aa",0]
- - [2,"aa",0]
+ - [1,"aa",null]
+ - [2,"aa",null]
- [3,"aa",30]
-
id: 7
desc: 纯历史窗口-滑动
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -202,7 +206,7 @@ cases:
order: id
columns: ["id int","c1 string","w1_c4_sum bigint"]
rows:
- - [1,"aa",0]
+ - [1,"aa",null]
- [2,"aa",30]
- [3,"aa",61]
- [4,"aa",63]
@@ -210,6 +214,7 @@ cases:
-
id: 8
desc: 两个pk,一个没有进入窗口,一个滑动
+ version: 0.6.0
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -228,11 +233,11 @@ cases:
order: id
columns: ["id int","c1 string","w1_c4_sum bigint"]
rows:
- - [1,"aa",0]
+ - [1,"aa",null]
- [2,"aa",30]
- [3,"aa",61]
- [4,"aa",63]
- - [5,"bb",0]
+ - [5,"bb",null]
-
id: 9
desc: 两个pk,一个全部进入窗口,一个滑动
@@ -348,6 +353,7 @@ cases:
-
id: 13
desc: ts列相同
+ mode: disk-unsupport
inputs:
-
columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -1050,15 +1056,15 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
order: id
- data: |
- 1, 1, 1, NULL
- 2, 2, 2, 1
- 3, 3, 3, 2
- 4, 4, 4, 3
- 5, 5, 5, 4
- 6, 4, 4, NULL
- 7, 3, 3, 4
- 8, 2, 2, 3
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
- id: 34
desc: |
@@ -1067,15 +1073,15 @@ cases:
- columns: [ "id int","ts timestamp","group1 string","val1 int" ]
indexs: [ "index1:group1:ts" ]
name: t1
- data: |
- 1, 1612130400000, g1, 1
- 2, 1612130401000, g1, 2
- 3, 1612130402000, g1, 3
- 4, 1612130403000, g1, 4
- 5, 1612130404000, g1, 5
- 6, 1612130404000, g2, 4
- 7, 1612130405000, g2, 3
- 8, 1612130406000, g2, 2
+ rows:
+ - [1, 1612130400000, g1, 1]
+ - [2, 1612130401000, g1, 2]
+ - [3, 1612130402000, g1, 3]
+ - [4, 1612130403000, g1, 4]
+ - [5, 1612130404000, g1, 5]
+ - [6, 1612130404000, g2, 4]
+ - [7, 1612130405000, g2, 3]
+ - [8, 1612130406000, g2, 2]
sql: |
select
`id`,
@@ -1088,15 +1094,15 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
order: id
- data: |
- 1, 1, 1, NULL
- 2, 2, 2, 1
- 3, 3, 3, 2
- 4, 4, 4, 3
- 5, 5, 5, 4
- 6, 4, 4, NULL
- 7, 3, 3, 4
- 8, 2, 2, 3
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
- id: 35
desc: |
@@ -1126,17 +1132,18 @@ cases:
expect:
columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
order: id
- data: |
- 1, 1, 1, NULL
- 2, 2, 2, 1
- 3, 3, 3, 2
- 4, 4, 4, 3
- 5, 5, 5, 4
- 6, 4, 4, NULL
- 7, 3, 3, 4
- 8, 2, 2, 3
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
- id: 36
+ version: 0.6.0
desc: |
correctness for window functions over window whose border is open
inputs:
@@ -1176,6 +1183,7 @@ cases:
3, 2, 22, 21, 22
- id: 37
+ version: 0.6.0
desc: |
correctness for rows_range window functions over window whose border is open
inputs:
diff --git a/cases/function/window/test_window_exclude_current_time.yaml b/cases/function/window/test_window_exclude_current_time.yaml
index c890a64116c..2f00fff56e1 100644
--- a/cases/function/window/test_window_exclude_current_time.yaml
+++ b/cases/function/window/test_window_exclude_current_time.yaml
@@ -13,8 +13,10 @@
# limitations under the License.
db: test_zw
+version: 0.5.0
cases:
- id: 0
+ mode: disk-unsupport
desc: ROWS_RANGE Window OPEN PRECEDING EXCLUDE CURRENT_TIME
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
@@ -88,6 +90,7 @@ cases:
- [ "aa", 9, 1590739002000, 2.0 ]
- id: 2
desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -125,6 +128,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0 ]
- id: 3
desc: ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -162,6 +166,7 @@ cases:
- [ "aa", 9, 1590739002000, 7.0 ]
- id: 4
desc: ROWS and ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -197,7 +202,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 7.0 ]
- id: 5
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
desc: ROWS_RANGE Window and EXCLUDE CURRENT_TIME Window
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
@@ -234,7 +239,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0 ]
- id: 6
desc: ROWS_RANGE Window with MaxSize 2 and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -270,7 +275,7 @@ cases:
- [ "aa", 9, 1590739002000, 2.0, 2.0 ]
- id: 7
desc: ROWS_RANGE Window with MaxSize 10 and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -306,7 +311,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0 ]
- id: 8
desc: ROWS Window and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -342,7 +347,7 @@ cases:
- [ "aa", 9, 1590739002000, 7.0, 7.0 ]
- id: 9
desc: ROWS and ROWS Window and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -389,7 +394,7 @@ cases:
- id: 10
desc: ROWS_RANGE Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -425,7 +430,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0 ]
- id: 11
desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING amd EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -461,7 +466,7 @@ cases:
- [ "aa", 9, 1590739002000, 2.0, 2.0 ]
- id: 12
desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -497,7 +502,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0 ]
- id: 13
desc: ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -533,7 +538,7 @@ cases:
- [ "aa", 9, 1590739002000, 7.0, 7.0 ]
- id: 14
desc: ROWS and ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -579,7 +584,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ]
- id: 16
desc: ROWS and ROWS Window 各类窗口混合
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -641,7 +646,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ]
- id: 17
desc: ROWS Window with same timestamp
- mode: offline-unsupport
+ mode: offline-unsupport,disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -675,6 +680,7 @@ cases:
- [ "aa", 9, 1590738993000, 4.0]
- id: 18
desc: ROWS Window with same timestamp Exclude CurretTime
+ mode: disk-unsupport
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -708,7 +714,7 @@ cases:
- [ "aa", 9, 1590738993000, 4.0]
- id: 19
desc: ROWS, ROWS_RANGE Window, Normal Window, OPEN Window, EXCLUDE CURRENT TIME Window
- mode: batch-unsupport
+ mode: batch-unsupport,disk-unsupport
tags: ["@chendihao, @baoxinqi, 测试的时候spark需要保证输入数据滑入顺序"]
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
diff --git a/cases/function/window/test_window_row.yaml b/cases/function/window/test_window_row.yaml
index 93529ffe430..c4b0814f8ba 100644
--- a/cases/function/window/test_window_row.yaml
+++ b/cases/function/window/test_window_row.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
-
id: 0
@@ -847,6 +848,7 @@ cases:
-
id: 38
desc: rows 1-2
+ version: 0.6.0
inputs:
-
columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
diff --git a/cases/function/window/test_window_row_range.yaml b/cases/function/window/test_window_row_range.yaml
index c72734f4dc8..71681b7d41e 100644
--- a/cases/function/window/test_window_row_range.yaml
+++ b/cases/function/window/test_window_row_range.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: string为partition by
@@ -681,6 +682,7 @@ cases:
- [ "aa", 9, 1590739002000, 3.0 ]
- id: 24-1
desc: ROWS_RANGE Pure History Window
+ version: 0.6.0
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -714,6 +716,7 @@ cases:
- [ "aa", 9, 1590739002000, 2.0 ]
- id: 24-2
desc: ROWS_RANGE Pure History Window With MaxSize
+ version: 0.6.0
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -940,6 +943,7 @@ cases:
- id: 27-3
desc: ROWS and ROWS_RANGE Pure History Window Cant' Be Merge
+ version: 0.6.0
inputs:
- columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
indexs: [ "index1:c1:c7" ]
@@ -1371,6 +1375,7 @@ cases:
-
id: 46
desc: timestamp为order by-2s-1s
+ version: 0.6.0
inputs:
-
columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -1443,6 +1448,7 @@ cases:
-
id: 49
desc: timestamp为order by-2s-1
+ version: 0.6.0
inputs:
-
columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
@@ -1467,6 +1473,7 @@ cases:
-
id: 50
desc: timestamp为order by-前后单位不一样
+ version: 0.6.0
inputs:
-
columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
diff --git a/cases/function/window/test_window_union.yaml b/cases/function/window/test_window_union.yaml
index 102934ff116..66e52dfe9e7 100644
--- a/cases/function/window/test_window_union.yaml
+++ b/cases/function/window/test_window_union.yaml
@@ -14,6 +14,7 @@
db: test_zw
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: 正常union
@@ -119,7 +120,7 @@ cases:
- [5,"ee",21,34]
- id: 5
desc: 样本表使用索引,UNION表未命中索引
- mode: rtidb-unsupport,cli-unsupport
+ mode: rtidb-unsupport
inputs:
- columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
indexs: ["index1:c3:c7"]
@@ -143,7 +144,7 @@ cases:
- [5,"ee",21,34]
- id: 6
desc: union表使用索引,样本表未命中索引
- mode: rtidb-unsupport,cli-unsupport
+ mode: rtidb-unsupport
inputs:
- columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
indexs: ["index1:c1:c7"]
@@ -341,7 +342,7 @@ cases:
- [4,"dd",20,96]
- [5,"ee",21,34]
- - id: 14-1
+ - id: 14
desc: WINDOW UNION 子查询, column cast 和 const cast子查询, string cast as date
mode: offline-unsupport
inputs:
@@ -572,6 +573,7 @@ cases:
- id: 18-1
desc: |
when UNION ROWS_RANGE has the same key with original rows, original rows first then union rows
+ mode: disk-unsupport
inputs:
- name: t1
columns:
@@ -624,6 +626,7 @@ cases:
desc: |
when UNION ROWS has the same key with original rows, original rows first then union rows,
union rows filtered out first for max window size limitation
+ mode: disk-unsupport
inputs:
- name: t1
columns:
@@ -671,6 +674,7 @@ cases:
1, 3, 233, 21, 200
2, 3, 400, 21, 21
- id: 18-3
+ mode: disk-unsupport
desc: |
when UNION ROWS_RANGE MAXSIZE has the same key with original rows, original rows first then union rows
union rows filtered out for max window size first
@@ -720,6 +724,7 @@ cases:
1, 2, 200, 21, 200
2, 2, 21, 0, 21
- id: 18-4
+ mode: disk-unsupport
desc: |
when UNION ROWS_RANGE EXCLUDE CURRENT_TIME has the same key with original rows, original rows first then union rows
other rows except current row filtered out by EXCLUDE CURRENT_TIME
@@ -777,14 +782,15 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 0, 1, 19, 19, NULL
- 1, 1, 18, 18, NULL
- 2, 4, 233, 18, 233
- 3, 4, 233, 5, 233
- 4, 7, 233, 5, 5
+ rows:
+ - [0, 1, 19, 19, NULL]
+ - [1, 1, 18, 18, NULL]
+ - [2, 4, 233, 18, 233]
+ - [3, 4, 233, 5, 233]
+ - [4, 7, 233, 5, 5]
- id: 18-5
+ mode: disk-unsupport
desc: |
UNION ROWS current time rows filtered out
inputs:
@@ -842,6 +848,7 @@ cases:
#
# 19-* series test case tests for this for SQL engine only, you should never reply on this behavior anyway
- id: 19-1
+ mode: disk-unsupport
desc: |
window unions multiple tables, the order for rows in union tables with same ts is explicitly as the order in SQL
inputs:
@@ -903,6 +910,7 @@ cases:
1, 6, 999, 0, 200, 233
2, 7, 10000, 0, 21, 200
- id: 19-2
+ mode: disk-unsupport
desc: |
rows order for pure history window union
inputs:
@@ -1017,15 +1025,15 @@ cases:
1, 100, 111, 200
1, 101, 111, 17
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- lag(val, 1) over w as l1
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW);
expect:
columns:
- id int
@@ -1040,7 +1048,7 @@ cases:
- id: 21
desc: |
rows_range window union with exclude current_row and exclude current_time
- mode: batch-unsupport
+ mode: batch-unsupport,disk-unsupport
inputs:
- name: t1
columns:
@@ -1084,16 +1092,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(type=Partition, table=t1, index=idx)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- lag(val, 1) over w as l1
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding and 0s preceding
- EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1152,16 +1160,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- lag(val, 1) over w as l1
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding and 0s preceding
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
expect:
columns:
- id int
@@ -1221,16 +1229,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- lag(val, 1) over w as l1
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding and 0s preceding
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1245,10 +1253,11 @@ cases:
3, 2, 233, 200, 200
4, 3, 233, 17, 17
- # rows_range union window with exclude current_row, single window
+ # rows_range union window with exclude current_row, single window
- id: 24
desc: |
rows_range union window with exclude_current_row
+ mode: disk-unsupport
inputs:
- name: t1
columns:
@@ -1291,15 +1300,15 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(type=Partition, table=t1, index=idx)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding and 0s preceding
- EXCLUDE CURRENT_ROW);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW);
expect:
columns:
- id int
@@ -1315,6 +1324,7 @@ cases:
- id: 25
desc: |
rows_range union window with exclude_current_row and exclude_current_time
+ mode: disk-unsupport
inputs:
- name: t1
columns:
@@ -1329,9 +1339,6 @@ cases:
2, 100, 111, 5
3, 101, 111, 0
4, 102, 111, 0
- 5, 0, 114, 7
- 6, 0, 114, 8
- 7, 100, 114, 9
- name: t2
columns:
- id int
@@ -1360,15 +1367,15 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(type=Partition, table=t1, index=idx)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1381,9 +1388,6 @@ cases:
2, 1, 233, 233
3, 4, 233, 5
4, 6, 233, 0
- 5, 0, NULL, NULL
- 6, 0, NULL, NULL
- 7, 2, 8, 7
- id: 26
desc: |
rows_range union window with exclude_current_row and instance_not_in_window
@@ -1430,15 +1434,15 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
expect:
columns:
- id int
@@ -1496,15 +1500,15 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1562,16 +1566,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- MAXSIZE 2
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1629,16 +1633,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(table=t1)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- MAXSIZE 2
- EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
expect:
columns:
- id int
@@ -1654,6 +1658,7 @@ cases:
- id: 30
desc: |
rows_range union window with exclude_current_row, exclude_current_time and maxsize
+ mode: disk-unsupport
inputs:
- name: t1
columns:
@@ -1668,10 +1673,6 @@ cases:
2, 100, 111, 5
3, 101, 111, 0
4, 102, 111, 0
- 5, 0, 114, 9
- 6, 0, 114, 17
- 7, 100, 114, 11
- 8, 101, 114, 14
- name: t2
columns:
- id int
@@ -1700,16 +1701,16 @@ cases:
DATA_PROVIDER(request=t1)
DATA_PROVIDER(type=Partition, table=t1, index=idx)
sql: |
- select
- id, count(val) over w as cnt,
- max(val) over w as mv,
- min(val) over w as mi,
- from t1 window w as(
- union t2
- partition by `g` order by `ts`
- rows_range between 3s preceding AND CURRENT ROW
- MAXSIZE 2
- EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
expect:
columns:
- id int
@@ -1722,7 +1723,476 @@ cases:
2, 1, 233, 233
3, 2, 21, 5
4, 2, 17, 0
- 5, 0, NULL, NULL
- 6, 0, NULL, NULL
- 7, 2, 17, 9
- 8, 2, 17, 11
+
+ - id: 31
+ desc: 主表ts都大于副表的
+ tags: ["TODO","cpp ut失败"]
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 32
+ desc: 主表ts都小于副表的
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,63]
+ - [5,"ee",21,34]
+ - id: 33
+ desc: 主表副表ts有交集
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 34
+ desc: 主表和副表分片在同一节点上
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ replicaNum: 3
+ partitionNum: 1
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ replicaNum: 3
+ partitionNum: 1
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 35
+ desc: 主表和副表分片在不同的节点上
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 36
+ desc: 两张副表,一张和主表在同一节点,另一张不在
+ db: db_wzx
+ sql: |
+ select
+ c1,
+ min(c1) over table_1_s2_t1 as table_1_c1_9,
+ min(c2) over table_1_s2_t1 as table_1_c2_10,
+ case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11,
+ case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12,
+ case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13
+ from
+ {0} as main
+ window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ inputs:
+ - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ expect:
+ order: c1
+ columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"]
+ rows:
+ - [1, 1, 2, NULL, NULL, NULL]
+
+
+ # =================================================================== #
+ # case id: [37 - 40]
+ # correctness verify for multiple window union in batch mode
+ # refer issue https://github.com/4paradigm/OpenMLDB/issues/1807
+ # =================================================================== #
+ - id: 37
+ mode: cluster-unsupport
+ desc: |
+ mulpile window support with one window union
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100000, 111, 21
+ 2, 100000, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 87000, 111, 300
+ 1, 95000, 111, 999
+ 1, 99000, 111, 233
+ 1, 100000, 111, 200
+ 1, 101000, 111, 17
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ sum(val) over w2 as m2
+ from t1 window
+ w as(union t2 partition by `g` order by `ts` ROWS_RANGE BETWEEN 5s preceding and 1s preceding),
+ w2 as (partition by `g` order by `ts` ROWS BETWEEN 1 PRECEDING and CURRENT ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 5000 PRECEDING, 1000 PRECEDING))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 5000 PRECEDING, 1000 PRECEDING))
+ RENAME(name=)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - m2 int
+ order: id
+ data: |
+ 1, 2, 999, 233, 21
+ 2, 2, 999, 233, 10021
+ - id: 38
+ mode: cluster-unsupport
+ desc: |
+ mulpile window support with two window union
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100000, 111, 21
+ 2, 100000, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 87000, 111, 300
+ 1, 95000, 111, 999
+ - name: t3
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 233
+ 1, 100000, 111, 200
+ 1, 101000, 111, 17
+ sql: |
+ select
+ id, count(val) over w1 as cnt,
+ max(val) over w1 as mv,
+ min(val) over w1 as mi,
+ sum(val) over w2 as m2,
+ sum_where(val, val > 200) over w3 as sw
+ from t1 window
+ w1 as(union t2 partition by `g` order by `ts` ROWS_RANGE BETWEEN 5s preceding and 0s preceding),
+ w2 as (union t3 partition by `g` order by `ts` ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
+ w3 as (partition by `g` order by `ts` ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 5000 PRECEDING, 0 PRECEDING))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 5000 PRECEDING, 0 PRECEDING))
+ RENAME(name=)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ RENAME(name=)
+ DATA_PROVIDER(type=Partition, table=t3, index=idx)
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 2 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - m2 int
+ - sw int
+ order: id
+ data: |
+ 1, 2, 999, 21, 221, NULL
+ 2, 3, 10000, 21, 10021, 10000
+ - id: 39
+ mode: cluster-unsupport
+ desc: |
+ mulpile window support with three window union
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100000, 111, 21
+ 2, 100000, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 87000, 111, 300
+ 1, 95000, 111, 999
+ - name: t3
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 233
+ 1, 100000, 111, 200
+ - name: t4
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 101000, 111, 17
+ sql: |
+ select
+ id, count(val) over w1 as cnt,
+ max(val) over w1 as mv,
+ min(val) over w1 as mi,
+ sum(val) over w2 as m2,
+ sum_where(val, val > 200) over w3 as sw
+ from t1 window
+ w1 as(union t2 partition by `g` order by `ts` ROWS_RANGE BETWEEN 5s preceding and 0s preceding),
+ w2 as (union t3 partition by `g` order by `ts` ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
+ w3 as (union t4 partition by `g` order by `ts` ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 5000 PRECEDING, 0 PRECEDING))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 5000 PRECEDING, 0 PRECEDING))
+ RENAME(name=)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ RENAME(name=)
+ DATA_PROVIDER(type=Partition, table=t3, index=idx)
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 2 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 2 PRECEDING, 0 CURRENT))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t4, index=idx)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - m2 int
+ - sw int
+ order: id
+ data: |
+ 1, 2, 999, 21, 221, NULL
+ 2, 3, 10000, 21, 10021, 10000
+ - id: 40
+ mode: cluster-unsupport
+ desc: |
+ mulpile window union with last join
+ # FIXME(ace): fail to resolve column g2
+ tags: ["TODO"]
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100000, 111, 21
+ 2, 100000, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 87000, 111, 300
+ 1, 95000, 111, 999
+ 1, 99000, 111, 233
+ 1, 100000, 111, 200
+ 1, 101000, 111, 17
+ - name: t3
+ columns:
+ - id2 int
+ - ts2 timestamp
+ - g2 int
+ - val2 int
+ indexs:
+ - idx:g2:ts2
+ data: |
+ 9, 88000, 111, 90
+ sql: |
+ select
+ id, g2, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ sum(val) over w2 as m2
+ from t1 last join t3 ON t1.g = t3.g2 window
+ w as(union t2 partition by `g` order by `ts` ROWS_RANGE BETWEEN 5s preceding and 1s preceding),
+ w2 as (partition by `g` order by `ts` ROWS BETWEEN 1 PRECEDING and CURRENT ROW);
+ batch_plan: |
+ expect:
+ columns:
+ - id int
+ - g2 int
+ - cnt int64
+ - mv int
+ - mi int
+ - m2 int
+ order: id
+ data: |
+ 1, 111, 2, 999, 233, 21
+ 2, 111, 2, 999, 233, 10021
diff --git a/cases/function/window/test_window_union_cluster_thousand.yaml b/cases/function/window/test_window_union_cluster_thousand.yaml
new file mode 100644
index 00000000000..aa12f1b549f
--- /dev/null
+++ b/cases/function/window/test_window_union_cluster_thousand.yaml
@@ -0,0 +1,1044 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 正常union
+ mode: disk-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,90]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
\ No newline at end of file
diff --git a/cases/function/window/window_attributes.yaml b/cases/function/window/window_attributes.yaml
index 3080dfeab87..7f3153d304b 100644
--- a/cases/function/window/window_attributes.yaml
+++ b/cases/function/window/window_attributes.yaml
@@ -5,6 +5,8 @@
# - MAXSIZE
debugs: []
+version: 0.6.0
+db: test_java
cases:
- id: 0
desc: ROWS_RANGE window with exclude_current_row
@@ -59,13 +61,13 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 0, 0, NULL, NULL, NULL
- 1, 1, 0, 0, 0
- 2, 0, NULL, NULL, 0
- 3, 1, 21, 21, 21
- 4, 2, 22, 21, 22
- 5, 0, NULL, NULL, NULL
+ rows:
+ - [0, 0, NULL, NULL, NULL]
+ - [1, 1, 0, 0, 0]
+ - [2, 0, NULL, NULL, 0]
+ - [3, 1, 21, 21, 21]
+ - [4, 2, 22, 21, 22]
+ - [5, 0, NULL, NULL, NULL]
- id: 1
desc: |
ROWS window with exclude_current_row, '0 PRECEDING EXCLUDE CURRENT_ROW' actually is the same as '0 OPEN PRECEDING'
@@ -101,11 +103,11 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 0, NULL, NULL, NULL
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
- id: 2
desc: |
ROWS_RANGE pure-history window with exclude_current_row
@@ -159,11 +161,11 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 0, NULL, NULL, NULL
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
- id: 3
desc: |
ROWS pure-history window with exclude_current_row
@@ -217,11 +219,11 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 0, NULL, NULL, NULL
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
- id: 4
desc: |
@@ -260,13 +262,13 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 2, 23, 22, 23
- 5, 0, NULL, NULL, NULL
- 6, 1, 56, 56, 56
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
- id: 5
desc: |
@@ -322,13 +324,13 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 2, 23, 22, 23
- 5, 0, NULL, NULL, NULL
- 6, 1, 56, 56, 56
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
- id: 6
desc: |
@@ -384,13 +386,13 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 3, 23, 21, 23
- 5, 0, NULL, NULL, NULL
- 6, 1, 56, 56, 56
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 3, 23, 21, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
- id: 7
desc: |
@@ -429,13 +431,13 @@ cases:
- mi int
- l1 int
order: id
- data: |
- 1, 0, NULL, NULL, NULL
- 2, 1, 21, 21, 21
- 3, 2, 22, 21, 22
- 4, 2, 23, 22, 23
- 5, 0, NULL, NULL, NULL
- 6, 1, 56, 56, 56
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
- id: 8
desc: |
@@ -482,6 +484,7 @@ cases:
5, 0, NULL, NULL, NULL
6, 1, 56, 56, 56
- id: 9
+ mode: disk-unsupport
desc: |
ROWS Window with exclude current_time and exclude current_row
inputs:
@@ -531,3 +534,31 @@ cases:
7, 2, 99, 0, 99
8, 3, 99, 0, 56
9, 3, 99, 52, 52
+ - id: 10
+ desc: rows and rows_range window won't merge if both exclude_current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT
+ c1, c3,
+ sum(c4) OVER w1 as w1_c4_sum,
+ count(c5) OVER w2 as w2_c5_count FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint","w2_c5_count bigint" ]
+ rows:
+ - [ "aa",20,null,0 ]
+ - [ "aa",21,30,1 ]
+ - [ "aa",22,61,2 ]
+ - [ "aa",23,63,2 ]
+ - [ "bb",24,null,0 ]
diff --git a/cases/integration_test/cluster/test_cluster_batch.yaml b/cases/integration_test/cluster/test_cluster_batch.yaml
new file mode 100644
index 00000000000..329fc9d170d
--- /dev/null
+++ b/cases/integration_test/cluster/test_cluster_batch.yaml
@@ -0,0 +1,199 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: SELECT columns
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6, c7 FROM {0};
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000]
+ - [ 2, "aa", 2.0, 1590738991000]
+ - [ 3, "aa", 3.0, 1590738992000]
+ - [ 4, "aa", 4.0, 1590738993000]
+ - [ 5, "bb", 5.0, 1590738994000]
+ - [ 6, "bb", 6.0, 1590738995000]
+ - [ 7, "bb", 7.0, 1590738996000]
+ - [ 8, "bb", 8.0, 1590738997000]
+ - [ 9, "bb", 9.0, 1590738998000]
+ - [ 10, "cc", 1.0, 1590738993000]
+ - [ 11, "cc", 2.0, 1590738994000]
+ - [ 12, "cc", 3.0, 1590738995000]
+ - [ 13, "cc", 4.0, 1590738996000]
+ - [ 14, "cc", 5.0, 1590738997000]
+ - [ 15, "dd", 6.0, 1590738998000]
+ - [ 16, "dd", 7.0, 1590738999000]
+
+ -
+ id: 1
+ desc: SELECT columns, some tablet result set is empty
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6, c7 FROM {0};
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000]
+ - [ 2, "aa", 2.0, 1590738991000]
+ - [ 3, "aa", 3.0, 1590738992000]
+ - [ 4, "aa", 4.0, 1590738993000]
+ - [ 15, "dd", 6.0, 1590738998000]
+ - [ 16, "dd", 7.0, 1590738999000]
+ -
+ id: 2
+ desc: SELECT simple expression
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0};
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"]
+ rows:
+ - [ 1, "aa", 2.0, 1590738990000, 2020]
+ - [ 2, "aa", 3.0, 1590738991000, 2020]
+ - [ 3, "aa", 4.0, 1590738992000, 2020]
+ - [ 4, "aa", 5.0, 1590738993000, 2020]
+ - [ 5, "bb", 6.0, 1590738994000, 2020]
+ - [ 6, "bb", 7.0, 1590738995000, 2020]
+ - [ 7, "bb", 8.0, 1590738996000, 2020]
+ - [ 8, "bb", 9.0, 1590738997000, 2020]
+ - [ 9, "bb", 10.0, 1590738998000, 2020]
+ - [ 10, "cc", 2.0, 1590738993000, 2020]
+ - [ 11, "cc", 3.0, 1590738994000, 2020]
+ - [ 12, "cc", 4.0, 1590738995000, 2020]
+ - [ 13, "cc", 5.0, 1590738996000, 2020]
+ - [ 14, "cc", 6.0, 1590738997000, 2020]
+ - [ 15, "dd", 7.0, 1590738998000, 2020]
+ - [ 16, "dd", 8.0, 1590738999000, 2020]
+
+ -
+ id: 3
+ desc: SELECT simple expression LIMIT 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0} LIMIT 10;
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"]
+ count: 10
+ -
+ id: 4
+ desc: SELECT simple expression LIMIT 3
+ mode: request-unsupport
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6+1.0 as f1, c7, year(c7) as f2 FROM {0} LIMIT 3;
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "f1 double", "c7 timestamp", "f2 int"]
+ count: 3
\ No newline at end of file
diff --git a/cases/integration_test/cluster/test_window_row.yaml b/cases/integration_test/cluster/test_window_row.yaml
new file mode 100644
index 00000000000..35f200af520
--- /dev/null
+++ b/cases/integration_test/cluster/test_window_row.yaml
@@ -0,0 +1,216 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 简单rows window
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000, 1.0, 1]
+ - [ 2, "aa", 2.0, 1590738991000, 1.0, 2]
+ - [ 3, "aa", 3.0, 1590738992000, 1.0, 3]
+ - [ 4, "aa", 4.0, 1590738993000, 2.0, 3]
+ - [ 5, "bb", 5.0, 1590738994000, 5.0, 1]
+ - [ 6, "bb", 6.0, 1590738995000, 5.0, 2]
+ - [ 7, "bb", 7.0, 1590738996000, 5.0, 3]
+ - [ 8, "bb", 8.0, 1590738997000, 6.0, 3]
+ - [ 9, "bb", 9.0, 1590738998000, 7.0, 3]
+ - [ 10, "cc", 1.0, 1590738993000, 1.0, 1]
+ - [ 11, "cc", 2.0, 1590738994000, 1.0, 2]
+ - [ 12, "cc", 3.0, 1590738995000, 1.0, 3]
+ - [ 13, "cc", 4.0, 1590738996000, 2.0, 3]
+ - [ 14, "cc", 5.0, 1590738997000, 3.0, 3]
+ - [ 15, "dd", 6.0, 1590738998000, 6.0, 1]
+ - [ 16, "dd", 7.0, 1590738999000, 6.0, 2]
+ -
+ id: 1
+ desc: 简单rows window, union副表
+ mode: cluster-unsupport
+ inputs:
+ - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000 ]
+ - [ 2, "aa", 4.0, 1590738993000 ]
+ - [ 3, "bb", 5.0, 1590738994000 ]
+ - [ 4, "bb", 9.0, 1590738998000 ]
+ - [ 5, "cc", 1.0, 1590738993000 ]
+ - [ 6, "cc", 5.0, 1590738997000 ]
+ - [ 7, "dd", 7.0, 1590738999000 ]
+ -
+ columns: ["x1 string","x6 double","x7 timestamp"]
+ indexs: ["index1:x1:x7"]
+ rows:
+ - ["aa", 2.0, 1590738991000]
+ - ["aa", 3.0, 1590738992000]
+ - ["bb", 6.0, 1590738995000]
+ - ["bb", 7.0, 1590738996000]
+ - ["bb", 8.0, 1590738997000]
+ - ["cc", 2.0, 1590738994000 ]
+ - ["cc", 3.0, 1590738995000 ]
+ - ["cc", 4.0, 1590738996000 ]
+ - ["dd", 6.0, 1590738998000 ]
+ sql: |
+ SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW
+ w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000, 1.0, 1]
+ - [ 2, "aa", 4.0, 1590738993000, 2.0, 3]
+ - [ 3, "bb", 5.0, 1590738994000, 5.0, 1]
+ - [ 4, "bb", 9.0, 1590738998000, 7.0, 3]
+ - [ 5, "cc", 1.0, 1590738993000, 1.0, 1]
+ - [ 6, "cc", 5.0, 1590738997000, 3.0, 3]
+ - [ 7, "dd", 7.0, 1590738999000, 6.0, 2]
+ -
+ id: 2
+ desc: 2 window,pk不同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c7 timestamp"]
+ indexs: ["index1:c1:c7", "index3:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1590738990000]
+ - [2,"aa",20,31,1590738991000]
+ - [3,"bb",20,32,1590738992000]
+ - [4,"bb",20,33,1590738993000]
+ - [5,"cc",21,34,1590738994000]
+ - [6,"aa",21,35,1590738995000]
+ - [7,"aa",21,36,1590738996000]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"bb",20,93,1]
+ - [4,"bb",20,96,2]
+ - [5,"cc",21,34,1]
+ - [6,"aa",21,69,3]
+ - [7,"aa",21,105,3]
+ -
+ id: 3
+ desc: 3 window,pk不同
+ inputs:
+ -
+ columns : ["id int","c1 string", "c2 string", "c3 int","c4 bigint","c7 timestamp"]
+ indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7"]
+ rows:
+ - [1,"aa", "1", 20,30,1590738990000]
+ - [2,"aa", "2", 20,31,1590738991000]
+ - [3,"bb", "1", 20,32,1590738992000]
+ - [4,"bb", "2", 20,33,1590738993000]
+ - [5,"cc", "1", 21,34,1590738994000]
+ - [6,"aa", "1", 21,35,1590738995000]
+ - [7,"aa", "1", 21,36,1590738996000]
+ sql: |
+ SELECT id, c1, c2, c3,
+ count(id) OVER w1 as w1_count,
+ count(id) OVER w2 as w2_count,
+ sum(c4) OVER w3 as w3_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c2 ORDER BY c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string", "c2 string", "c3 int","w1_count bigint","w2_count bigint", "w3_c4_sum bigint"]
+ rows:
+ - [1,"aa", "1", 20, 1, 1, 30]
+ - [2,"aa", "2", 20, 2, 1, 61]
+ - [3,"bb", "1", 20, 1, 2, 93]
+ - [4,"bb", "2", 20, 2, 2, 96]
+ - [5,"cc", "1", 21, 1, 3, 34]
+ - [6,"aa", "1", 21, 3, 4, 69]
+ - [7,"aa", "1", 21, 3, 4, 105]
+
+ - id: 4
+ desc: 简单rows window, union副表, 主表不进入窗口 40w
+ tags: ["TODO", "@baoxinqi, batch request unsupport"]
+ inputs:
+ - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 2, "aa", 4.0, 1590738993000 ]
+ - columns: [ "x1 string","x6 double","x7 timestamp" ]
+ indexs: [ "index1:x1:x7" ]
+ repeat: 400
+ rows:
+ - [ "aa", 2.0, 1590738991000 ]
+
+ sql: |
+ SELECT id, c1, c6, c7, count(id) OVER w1 as w1_cnt, distinct_count(id) OVER w1 as w1_dis_cnt FROM {0} WINDOW
+ w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 400000 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int", "c1 string", "c6 double", "c7 timestamp", "w1_cnt bigint", "w1_dis_cnt bigint" ]
+ rows:
+ - [ 2, "aa", 4.0, 1590738993000, 400001, 2 ]
+ - id: 5
+ desc: 简单rows window, union副表, 主表不进入窗口3 4w
+ mode: batch-request-unsupport, cli-unsupport
+ inputs:
+ - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 2, "aa", 4.0, 1590738993000 ]
+ - columns: [ "x1 string","x6 double","x7 timestamp" ]
+ indexs: [ "index1:x1:x7" ]
+ repeat: 400
+ rows:
+ - [ "aa", 2.0, 1590738991000 ]
+
+ sql: |
+ SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_min_c6, count(id) OVER w1 as w1_cnt FROM {0} WINDOW
+ w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 400000 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int", "c1 string", "c6 double", "c7 timestamp", "w1_min_c6 double", "w1_cnt bigint" ]
+ rows:
+ - [ 2, "aa", 4.0, 1590738993000, 2.0, 401 ]
diff --git a/cases/integration_test/cluster/test_window_row_range.yaml b/cases/integration_test/cluster/test_window_row_range.yaml
new file mode 100644
index 00000000000..476336fe4c0
--- /dev/null
+++ b/cases/integration_test/cluster/test_window_row_range.yaml
@@ -0,0 +1,172 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 简单rows window
+ inputs:
+ -
+ columns: ["id int", "c1 string","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1.0, 1590738990000]
+ - [2, "aa", 2.0, 1590738991000]
+ - [3, "aa", 3.0, 1590738992000]
+ - [4, "aa", 4.0, 1590738993000]
+ - [5, "bb", 5.0, 1590738994000]
+ - [6, "bb", 6.0, 1590738995000]
+ - [7, "bb", 7.0, 1590738996000]
+ - [8, "bb", 8.0, 1590738997000]
+ - [9, "bb", 9.0, 1590738998000]
+ - [10, "cc", 1.0, 1590738993000]
+ - [11, "cc", 2.0, 1590738994000 ]
+ - [12, "cc", 3.0, 1590738995000 ]
+ - [13, "cc", 4.0, 1590738996000 ]
+ - [14, "cc", 5.0, 1590738997000 ]
+ - [15, "dd", 6.0, 1590738998000 ]
+ - [16, "dd", 7.0, 1590738999000 ]
+ sql: |
+ SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000, 1.0, 1]
+ - [ 2, "aa", 2.0, 1590738991000, 1.0, 2]
+ - [ 3, "aa", 3.0, 1590738992000, 1.0, 3]
+ - [ 4, "aa", 4.0, 1590738993000, 2.0, 3]
+ - [ 5, "bb", 5.0, 1590738994000, 5.0, 1]
+ - [ 6, "bb", 6.0, 1590738995000, 5.0, 2]
+ - [ 7, "bb", 7.0, 1590738996000, 5.0, 3]
+ - [ 8, "bb", 8.0, 1590738997000, 6.0, 3]
+ - [ 9, "bb", 9.0, 1590738998000, 7.0, 3]
+ - [ 10, "cc", 1.0, 1590738993000, 1.0, 1]
+ - [ 11, "cc", 2.0, 1590738994000, 1.0, 2]
+ - [ 12, "cc", 3.0, 1590738995000, 1.0, 3]
+ - [ 13, "cc", 4.0, 1590738996000, 2.0, 3]
+ - [ 14, "cc", 5.0, 1590738997000, 3.0, 3]
+ - [ 15, "dd", 6.0, 1590738998000, 6.0, 1]
+ - [ 16, "dd", 7.0, 1590738999000, 6.0, 2]
+ -
+ id: 1
+ desc: 简单rows window, union副表, 主表进入窗口
+ mode: cluster-unsupport
+ inputs:
+ - columns: [ "id int", "c1 string","c6 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000 ]
+ - [ 2, "aa", 4.0, 1590738993000 ]
+ - [ 3, "bb", 5.0, 1590738994000 ]
+ - [ 4, "bb", 9.0, 1590738998000 ]
+ - [ 5, "cc", 1.0, 1590738993000 ]
+ - [ 6, "cc", 5.0, 1590738997000 ]
+ - [ 7, "dd", 7.0, 1590738999000 ]
+ -
+ columns: ["x1 string","x6 double","x7 timestamp"]
+ indexs: ["index1:x1:x7"]
+ rows:
+ - ["aa", 2.0, 1590738991000]
+ - ["aa", 3.0, 1590738992000]
+ - ["bb", 6.0, 1590738995000]
+ - ["bb", 7.0, 1590738996000]
+ - ["bb", 8.0, 1590738997000]
+ - ["cc", 2.0, 1590738994000 ]
+ - ["cc", 3.0, 1590738995000 ]
+ - ["cc", 4.0, 1590738996000 ]
+ - ["dd", 6.0, 1590738998000 ]
+ sql: |
+ SELECT id, c1, c6, c7, min(c6) OVER w1 as w1_c6_min, count(id) OVER w1 as w1_cnt FROM {0} WINDOW
+ w1 AS (UNION (select 0 as id, x1 as c1, x6 as c6, x7 as c7 from {1}) as t2 PARTITION BY c1 ORDER BY c7 ROWS_RANGE
+ BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c6 double", "c7 timestamp", "w1_c6_min double","w1_cnt bigint"]
+ rows:
+ - [ 1, "aa", 1.0, 1590738990000, 1.0, 1]
+ - [ 2, "aa", 4.0, 1590738993000, 2.0, 3]
+ - [ 3, "bb", 5.0, 1590738994000, 5.0, 1]
+ - [ 4, "bb", 9.0, 1590738998000, 7.0, 3]
+ - [ 5, "cc", 1.0, 1590738993000, 1.0, 1]
+ - [ 6, "cc", 5.0, 1590738997000, 3.0, 3]
+ - [ 7, "dd", 7.0, 1590738999000, 6.0, 2]
+ -
+ id: 2
+ desc: 2 window,pk不同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c7 timestamp"]
+ indexs: ["index1:c1:c7", "index3:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1590738990000]
+ - [2,"aa",20,31,1590738991000]
+ - [3,"bb",20,32,1590738992000]
+ - [4,"bb",20,33,1590738993000]
+ - [5,"cc",21,34,1590738994000]
+ - [6,"aa",21,35,1590738995000]
+ - [7,"aa",21,36,1590738996000]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY c3 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c1 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"bb",20,93,1]
+ - [4,"bb",20,96,2]
+ - [5,"cc",21,34,1]
+ - [6,"aa",21,69,1]
+ - [7,"aa",21,105,2]
+ -
+ id: 3
+ desc: 3 window,pk不同
+ inputs:
+ -
+ columns : ["id int","c1 string", "c2 string", "c3 int","c4 bigint","c7 timestamp"]
+ indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7"]
+ rows:
+ - [1,"aa", "1", 20,30,1590738990000]
+ - [2,"aa", "2", 20,31,1590738991000]
+ - [3,"bb", "1", 20,32,1590738992000]
+ - [4,"bb", "2", 20,33,1590738993000]
+ - [5,"cc", "1", 21,34,1590738994000]
+ - [6,"aa", "1", 21,35,1590738995000]
+ - [7,"aa", "1", 21,36,1590738996000]
+ sql: |
+ SELECT id, c1, c2, c3,
+ count(id) OVER w1 as w1_count,
+ count(id) OVER w2 as w2_count,
+ sum(c4) OVER w3 as w3_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY c1 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c2 ORDER BY c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY c3 ORDER BY c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string", "c2 string", "c3 int","w1_count bigint","w2_count bigint", "w3_c4_sum bigint"]
+ rows:
+ - [1,"aa", "1", 20, 1, 1, 30]
+ - [2,"aa", "2", 20, 2, 1, 61]
+ - [3,"bb", "1", 20, 1, 2, 93]
+ - [4,"bb", "2", 20, 2, 2, 96]
+ - [5,"cc", "1", 21, 1, 2, 34]
+ - [6,"aa", "1", 21, 1, 3, 69]
+ - [7,"aa", "1", 21, 2, 3, 105]
diff --git a/cases/integration_test/cluster/window_and_lastjoin.yaml b/cases/integration_test/cluster/window_and_lastjoin.yaml
new file mode 100644
index 00000000000..c20e6e070ee
--- /dev/null
+++ b/cases/integration_test/cluster/window_and_lastjoin.yaml
@@ -0,0 +1,620 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 简单拼表
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",2, 1590738990000, 3.3]
+ - [4, "cc",3, 1590738990000, 4.0]
+ - [5, "cc",3, 1590738991000, 5.0]
+ - [6, "cc",3, 1590738992000, 6.0]
+ - [7, "cc",2, 1590738993000, 7.0]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"]
+ indexs: ["index2:merchant_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, 1]
+ - [1590738990000, 1]
+ - [1590738991000, 2]
+ - [1590738989000, 3]
+ - [1590738992000, 3]
+ sql: |
+ select id, card_no, merchant_id, trx_time, crd_lst_isu_dte, merchant_nbr from {0}
+ last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte;
+ expect:
+ columns: ["id int", "card_no string", "merchant_id int", "trx_time timestamp",
+ "crd_lst_isu_dte timestamp", "merchant_nbr int"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1, 1590738989000, 1590738988000, 1]
+ - [2, "aaaaaaaaaa", 1, 1590738990000, 1590738990000, 1]
+ - [3, "bb", 2, 1590738990000, null, null]
+ - [4, "cc", 3, 1590738990000, 1590738989000, 3]
+ - [5, "cc", 3, 1590738991000, 1590738989000, 3]
+ - [6, "cc", 3, 1590738992000, 1590738992000, 3]
+ - [7, "cc", 2, 1590738993000, 1590738991000, 2]
+ -
+ id: 1
+ desc: 三表拼表
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "user string", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, "user1", 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, "user2", 1590738990000, 2.2]
+ - [3, "bb",2, "user3", 1590738990000, 3.3]
+ - [4, "cc",3, "user4", 1590738990000, 4.0]
+ - [5, "cc",3, "user5", 1590738991000, 5.0]
+ - [6, "cc",3, "user6", 1590738992000, 6.0]
+ - [7, "cc",2, "user7", 1590738993000, 7.0]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"]
+ indexs: ["index2:merchant_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, 1]
+ - [1590738990000, 1]
+ - [1590738991000, 2]
+ - [1590738989000, 3]
+ - [1590738992000, 3]
+ - columns: [ "std_ts timestamp", "username string" ]
+ indexs: [ "index2:username:std_ts" ]
+ rows:
+ - [ 1590738988000, "user1"]
+ - [ 1590738990000, "user1"]
+ - [ 1590738991000, "user2"]
+ - [ 1590738989000, "user2"]
+ - [ 1590738992000, "user3" ]
+ sql: |
+ select id, card_no, merchant_id, user, trx_time, crd_lst_isu_dte, merchant_nbr, std_ts, username from {0}
+ last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte
+ last join {2} order by {2}.std_ts on {0}.user = {2}.username;
+ expect:
+ columns: ["id int", "card_no string", "merchant_id int", "user string", "trx_time timestamp",
+ "crd_lst_isu_dte timestamp", "merchant_nbr int", "std_ts timestamp", "username string"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1, "user1", 1590738989000, 1590738988000, 1, 1590738990000, "user1"]
+ - [2, "aaaaaaaaaa", 1, "user2", 1590738990000, 1590738990000, 1, 1590738991000, "user2"]
+ - [3, "bb", 2, "user3", 1590738990000, null, null, 1590738992000, "user3", ]
+ - [4, "cc", 3, "user4", 1590738990000, 1590738989000, 3, null, null]
+ - [5, "cc", 3, "user5", 1590738991000, 1590738989000, 3, null, null]
+ - [6, "cc", 3, "user6", 1590738992000, 1590738992000, 3, null, null]
+ - [7, "cc", 2, "user7", 1590738993000, 1590738991000, 2, null, null]
+ -
+ id: 2
+ desc: 三表拼表2
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "user string", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, "user1", 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, "user2", 1590738990000, 2.2]
+ - [3, "bb",2, "user3", 1590738990000, 3.3]
+ - [4, "cc",3, "user4", 1590738990000, 4.0]
+ - [5, "cc",3, "user5", 1590738991000, 5.0]
+ - [6, "cc",3, "user6", 1590738992000, 6.0]
+ - [7, "cc",2, "user7", 1590738993000, 7.0]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int", "product_nbr bigint"]
+ indexs: ["index2:merchant_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, 1, 1001]
+ - [1590738990000, 1, 1002]
+ - [1590738991000, 2, 1003]
+ - [1590738989000, 3, 1004]
+ - [1590738992000, 3, 1005]
+ - columns: [ "std_ts timestamp", "product_id bigint" ]
+ indexs: [ "index2:product_id:std_ts" ]
+ rows:
+ - [ 1590738988000, 1001]
+ - [ 1590738990000, 1001]
+ - [ 1590738991000, 1001]
+ - [ 1590738989000, 1002]
+ - [ 1590738992000, 1002]
+ - [ 1590738993000, 1005]
+ sql: |
+ select id, card_no, merchant_id, user, trx_time, crd_lst_isu_dte, merchant_nbr, product_nbr, std_ts, product_id from {0}
+ last join {1} order by {1}.crd_lst_isu_dte on {0}.merchant_id = {1}.merchant_nbr and {0}.trx_time >= {1}.crd_lst_isu_dte
+ last join {2} order by {2}.std_ts on {1}.product_nbr = {2}.product_id;
+ expect:
+ columns: ["id int", "card_no string", "merchant_id int", "user string", "trx_time timestamp",
+ "crd_lst_isu_dte timestamp", "merchant_nbr int", "product_nbr bigint", "std_ts timestamp", "product_id bigint"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1, "user1", 1590738989000, 1590738988000, 1, 1001, 1590738991000, 1001]
+ - [2, "aaaaaaaaaa", 1, "user2", 1590738990000, 1590738990000, 1, 1002, 1590738992000, 1002]
+ - [3, "bb", 2, "user3", 1590738990000, null, null, null, null, null]
+ - [4, "cc", 3, "user4", 1590738990000, 1590738989000, 3, 1004, null, null]
+ - [5, "cc", 3, "user5", 1590738991000, 1590738989000, 3, 1004, null, null]
+ - [6, "cc", 3, "user6", 1590738992000, 1590738992000, 3, 1005, 1590738993000, 1005]
+ - [7, "cc", 2, "user7", 1590738993000, 1590738991000, 2, 1003, null, null]
+ -
+ id: 3
+ desc: 窗口特征拼接副表
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",2, 1590738990000, 3.3]
+ - [4, "cc",3, 1590738990000, 4.0]
+ - [5, "cc",3, 1590738991000, 5.0]
+ - [6, "cc",3, 1590738992000, 6.0]
+ - [7, "cc",2, 1590738993000, 7.0]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "merchant_nbr int"]
+ indexs: ["index2:merchant_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, 1]
+ - [1590738990000, 1]
+ - [1590738991000, 2]
+ - [1590738989000, 3]
+ - [1590738992000, 3]
+ sql: select * from
+ (select
+ id,
+ card_no,
+ merchant_id,
+ trx_time,
+ sum(trx_amt) over w30d as sum_trx_amt,
+ count(merchant_id) over w10d as count_merchant_id
+ from {0}
+ window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe
+ last join {1} order by {1}.crd_lst_isu_dte on trx_fe.merchant_id = {1}.merchant_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte;
+ expect:
+ columns: ["id int", "card_no string", "merchant_id int", "trx_time timestamp",
+ "sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp",
+ "merchant_nbr int"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1, 1590738989000, 1.1, 1, 1590738988000, 1]
+ - [2, "aaaaaaaaaa", 1, 1590738990000, 3.3, 2, 1590738990000, 1]
+ - [3, "bb", 2, 1590738990000, 3.3, 1, null, null]
+ - [4, "cc", 3, 1590738990000, 4.0, 1, 1590738989000, 3]
+ - [5, "cc", 3, 1590738991000, 9.0, 2, 1590738989000, 3]
+ - [6, "cc", 3, 1590738992000, 15.0, 3, 1590738992000, 3]
+ - [7, "cc", 2, 1590738993000, 22.0, 4, 1590738991000, 2]
+ -
+ id: 4
+ desc: 3组窗口特征ID拼接
+ inputs:
+ -
+ columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"]
+ indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7", "index4:c4:c7"]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000]
+ - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000]
+ - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000]
+ - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000]
+ - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000]
+ - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000]
+ - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590739998000 ]
+ - [10, "b", "bb", "bbb", "bbbb", "1.0", 1590739999000 ]
+ sql: |
+ select * from
+ (
+ select id as out1_id, c1, c6 from {0}
+ ) as out1 last join
+ (
+ select id as out2_id, c2, sum(c6) over w2 as w2_sum_c6 from {0}
+ window w2 as (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out2 on out1_id=out2_id last join
+ (
+ select id as out3_id, c3, sum(c6) over w3 as w3_sum_c6 from {0}
+ window w3 as (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out3 on out1_id=out3_id last join
+ (
+ select id as out4_id, c4, sum(c6) over w4 as w4_sum_c6 from {0}
+ window w4 as (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out4 on out1_id=out4_id;
+ expect:
+ columns: ["out1_id int", "c1 string", "c6 double",
+ "out2_id int", "c2 string", "w2_sum_c6 double",
+ "out3_id int", "c3 string", "w3_sum_c6 double",
+ "out4_id int", "c4 string", "w4_sum_c6 double",]
+ order: out1_id
+ rows:
+ - [ 1, "a", 1.0, 1, "aa", 1.0, 1, "aaa", 1.0, 1, "aaaa", 1.0]
+ - [ 2, "a", 1.0, 2, "aa", 2.0, 2, "aaa", 2.0, 2, "aaaa", 2.0]
+ - [ 3, "a", 1.0, 3, "aa", 3.0, 3, "aaa", 3.0, 3, "bbbb", 1.0]
+ - [ 4, "a", 1.0, 4, "aa", 4.0, 4, "aaa", 4.0, 4, "bbbb", 2.0]
+ - [ 5, "a", 1.0, 5, "aa", 5.0, 5, "bbb", 1.0, 5, "bbbb", 3.0]
+ - [ 6, "a", 1.0, 6, "aa", 6.0, 6, "bbb", 2.0, 6, "bbbb", 4.0]
+ - [ 7, "a", 1.0, 7, "bb", 1.0, 7, "bbb", 3.0, 7, "bbbb", 5.0]
+ - [ 8, "a", 1.0, 8, "bb", 2.0, 8, "bbb", 4.0, 8, "bbbb", 6.0]
+ - [ 9, "b", 1.0, 9, "bb", 3.0, 9, "bbb", 5.0, 9, "bbbb", 7.0]
+ - [10, "b", 1.0, 10, "bb", 4.0, 10, "bbb", 6.0, 10, "bbbb", 8.0]
+ -
+ id: 5
+ desc: 4组窗口特征ID拼接
+ inputs:
+ -
+ columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"]
+ indexs: ["index1:c1:c7", "index2:c2:c7", "index3:c3:c7", "index4:c4:c7"]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000]
+ - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000]
+ - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000]
+ - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000]
+ - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000]
+ - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000]
+ - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590739998000 ]
+ - [10, "b", "bb", "bbb", "bbbb", "1.0", 1590739999000 ]
+ sql: |
+ select * from
+ (
+ select id as out1_id, c1, sum(c6) over w1 as w1_sum_c6 from {0}
+ window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out1 last join
+ (
+ select id as out2_id, c2, sum(c6) over w2 as w2_sum_c6 from {0}
+ window w2 as (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out2 on out1_id=out2_id last join
+ (
+ select id as out3_id, c3, sum(c6) over w3 as w3_sum_c6 from {0}
+ window w3 as (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out3 on out1_id=out3_id last join
+ (
+ select id as out4_id, c4, sum(c6) over w4 as w4_sum_c6 from {0}
+ window w4 as (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as out4 on out1_id=out4_id;
+ request_plan: |
+ SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6, out2_id, c2, w2_sum_c6, out3_id, c3, w3_sum_c6, out4.out4_id, out4.c4, out4.w4_sum_c6))
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out4_id), index_keys=)
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out3_id), index_keys=)
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out2_id), index_keys=)
+ RENAME(name=out1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ RENAME(name=out2)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c2))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index2)
+ RENAME(name=out3)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c3))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index3)
+ RENAME(name=out4)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c4))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index4)
+
+ cluster_request_plan: |
+ SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6, out2_id, c2, w2_sum_c6, out3_id, c3, w3_sum_c6, out4.out4_id, out4.c4, out4.w4_sum_c6))
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out4_id), index_keys=)
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out3_id), index_keys=)
+ REQUEST_JOIN(type=LastJoin, condition=, left_keys=(out1_id), right_keys=(out2_id), index_keys=)
+ RENAME(name=out1)
+ SIMPLE_PROJECT(sources=(out1_id, c1, w1_sum_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ SIMPLE_PROJECT(sources=(id -> out1_id, c1))
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ RENAME(name=out2)
+ SIMPLE_PROJECT(sources=(out2_id, c2, w2_sum_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ SIMPLE_PROJECT(sources=(id -> out2_id, c2))
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c2))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index2)
+ RENAME(name=out3)
+ SIMPLE_PROJECT(sources=(out3_id, c3, w3_sum_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ SIMPLE_PROJECT(sources=(id -> out3_id, c3))
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c3))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index3)
+ RENAME(name=out4)
+ SIMPLE_PROJECT(sources=(out4_id, c4, w4_sum_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ SIMPLE_PROJECT(sources=(id -> out4_id, c4))
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c4))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index4)
+ expect:
+ columns: ["out1_id int", "c1 string", "w1_sum_c6 double",
+ "out2_id int", "c2 string", "w2_sum_c6 double",
+ "out3_id int", "c3 string", "w3_sum_c6 double",
+ "out4_id int", "c4 string", "w4_sum_c6 double",]
+ order: out1_id
+ rows:
+ - [ 1, "a", 1.0, 1, "aa", 1.0, 1, "aaa", 1.0, 1, "aaaa", 1.0]
+ - [ 2, "a", 2.0, 2, "aa", 2.0, 2, "aaa", 2.0, 2, "aaaa", 2.0]
+ - [ 3, "a", 3.0, 3, "aa", 3.0, 3, "aaa", 3.0, 3, "bbbb", 1.0]
+ - [ 4, "a", 4.0, 4, "aa", 4.0, 4, "aaa", 4.0, 4, "bbbb", 2.0]
+ - [ 5, "a", 5.0, 5, "aa", 5.0, 5, "bbb", 1.0, 5, "bbbb", 3.0]
+ - [ 6, "a", 6.0, 6, "aa", 6.0, 6, "bbb", 2.0, 6, "bbbb", 4.0]
+ - [ 7, "a", 7.0, 7, "bb", 1.0, 7, "bbb", 3.0, 7, "bbbb", 5.0]
+ - [ 8, "a", 8.0, 8, "bb", 2.0, 8, "bbb", 4.0, 8, "bbbb", 6.0]
+ - [ 9, "b", 1.0, 9, "bb", 3.0, 9, "bbb", 5.0, 9, "bbbb", 7.0]
+ - [10, "b", 2.0, 10, "bb", 4.0, 10, "bbb", 6.0, 10, "bbbb", 8.0]
+ -
+ id: 6
+ desc: 窗口特征拼接多张副表, last join 条件表达式1
+ inputs:
+ -
+ columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000]
+ - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000]
+ - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000]
+ - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000]
+ - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000]
+ - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000]
+ - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000 ]
+ - [10, "b", "bb", "bbb", "bbbb", "10.0", 1590738999000 ]
+ - columns: ["rid int", "x1 string", "x2 string", "x3 string", "x4 string", "x6 double", "x7 timestamp"]
+ indexs: ["index1:x1:x7", "index2:x2:x7", "index3:x3:x7", "index4:x4:x7", ]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000 ]
+ - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000 ]
+ - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000 ]
+ - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000 ]
+ - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000 ]
+ - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000 ]
+ - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590738998000 ]
+ - [ 10, "b", "bb", "bbb", "bbbb", "1.0",1590738999000 ]
+ sql: |
+ select id, c1, c2, c3, c4, c6, c7, cur_hour, today
+ , w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6
+ , t1.rid as t1_rid, t2.rid as t2_rid, t3.rid as t3_rid, t4.rid as t4_rid
+ from
+ (
+ select id, c1, c2, c3, c4, c6, c7, hour(c7) as cur_hour, day(c7) as today
+ , sum(c6) over w1 as w1_sum_c6
+ , max(c6) over w1 as w1_max_c6
+ , min(c6) over w1 as w1_min_c6
+ , avg(c6) over w1 as w1_avg_c6
+ , count(c6) over w1 as w1_cnt_c6
+ from {0}
+ window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as w_out last join {1} as t1 order by t1.x7 on c1 = t1.x1 and c7 - 1000 >= t1.x7
+ last join {1} as t2 order by t2.x7 on c2 = t2.x2 and c7 - 2000 >= t2.x7
+ last join {1} as t3 order by t3.x7 on c3 = t3.x3 and c7 - 3000 >= t3.x7
+ last join {1} as t4 order by t4.x7 on c4 = t4.x4 and c7 - 4000 >= t4.x7;
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 4000 >= t4.x7, left_keys=(), right_keys=(), index_keys=(c4))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 3000 >= t3.x7, left_keys=(), right_keys=(), index_keys=(c3))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 2000 >= t2.x7, left_keys=(), right_keys=(), index_keys=(c2))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 1000 >= t1.x7, left_keys=(), right_keys=(), index_keys=(c1))
+ RENAME(name=w_out)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ RENAME(name=t2)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index2)
+ RENAME(name=t3)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index3)
+ RENAME(name=t4)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index4)
+ cluster_request_plan: |
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ RENAME(name=w_out)
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=RowProject)
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 1000 >= #35, left_keys=(), right_keys=(), index_keys=(#9))
+ SIMPLE_PROJECT(sources=(#9 -> c1, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 2000 >= #35, left_keys=(), right_keys=(), index_keys=(#10))
+ SIMPLE_PROJECT(sources=(#10 -> c2, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t2)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index2)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 3000 >= #35, left_keys=(), right_keys=(), index_keys=(#11))
+ SIMPLE_PROJECT(sources=(#11 -> c3, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t3)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index3)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 4000 >= #35, left_keys=(), right_keys=(), index_keys=(#12))
+ SIMPLE_PROJECT(sources=(#12 -> c4, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t4)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index4)
+ expect:
+ columns: ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp",
+ "cur_hour int32", "today int32", "w1_sum_c6 double", "w1_max_c6 double",
+ "w1_min_c6 double", "w1_avg_c6 double", "w1_cnt_c6 bigint",
+ "t1_rid int32", "t2_rid int32", "t3_rid int32", "t4_rid int32"]
+ order: id
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000, 15, 29, 1.0, 1.0, 1.0, 1.0, 1, NULL, NULL, NULL, NULL]
+ - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000, 15, 29, 3.0, 2.0, 1.0, 1.5, 2, 1, NULL, NULL, NULL ]
+ - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000, 15, 29, 6.0, 3.0, 1.0, 2.0, 3, 2 , 1, NULL, NULL]
+ - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000, 15, 29, 10.0, 4.0, 1.0, 2.5, 4, 3 , 2, 1, NULL]
+ - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000, 15, 29, 15.0, 5.0, 1.0, 3.0, 5, 4 , 3, NULL, NULL]
+ - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000, 15, 29, 21.0, 6.0, 1.0, 3.5, 6, 5 , 4, NULL, NULL]
+ - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000, 15, 29, 28.0, 7.0, 1.0, 4.0, 7, 6 , NULL, NULL, 3]
+ - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000, 15, 29, 36.0, 8.0, 1.0, 4.5, 8, 7 , NULL, 5, 4]
+ - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000, 15, 29, 9.0, 9.0, 9.0, 9.0, 1, NULL , 7, 6, 5]
+ - [ 10, "b", "bb", "bbb", "bbbb", "10.0",1590738999000,15, 29, 19.0, 10.0, 9.0, 9.5, 2, 9, 8, 7, 6]
+
+
+ -
+ id: 7
+ desc: 窗口特征拼接多张副表, last join 条件表达式2
+ inputs:
+ -
+ columns : ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000]
+ - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000]
+ - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000]
+ - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000]
+ - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000]
+ - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000]
+ - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000 ]
+ - [10, "b", "bb", "bbb", "bbbb", "10.0", 1590738999000 ]
+ - columns: ["rid int", "x1 string", "x2 string", "x3 string", "x4 string", "x6 double", "x7 timestamp"]
+ indexs: ["index1:x1:x7", "index2:x2:x7", "index3:x3:x7", "index4:x4:x7", ]
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000 ]
+ - [ 2, "a", "aa", "aaa", "aaaa", "1.0", 1590738991000 ]
+ - [ 3, "a", "aa", "aaa", "bbbb", "1.0", 1590738992000 ]
+ - [ 4, "a", "aa", "aaa", "bbbb", "1.0", 1590738993000 ]
+ - [ 5, "a", "aa", "bbb", "bbbb", "1.0", 1590738994000 ]
+ - [ 6, "a", "aa", "bbb", "bbbb", "1.0", 1590738995000 ]
+ - [ 7, "a", "bb", "bbb", "bbbb", "1.0", 1590738996000 ]
+ - [ 8, "a", "bb", "bbb", "bbbb", "1.0", 1590738997000 ]
+ - [ 9, "b", "bb", "bbb", "bbbb", "1.0", 1590738998000 ]
+ - [ 10, "b", "bb", "bbb", "bbbb", "1.0",1590738999000 ]
+ sql: |
+ select id, c1, c2, c3, c4, c6, c7, cur_hour, today
+ , w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6
+ , t1.rid as t1_rid, t2.rid as t2_rid, t3.rid as t3_rid, t4.rid as t4_rid
+ from
+ (
+ select id, c1, c2, c3, c4, c6, c7, hour(c7) as cur_hour, day(c7) as today
+ , sum(c6) over w1 as w1_sum_c6
+ , max(c6) over w1 as w1_max_c6
+ , min(c6) over w1 as w1_min_c6
+ , avg(c6) over w1 as w1_avg_c6
+ , count(c6) over w1 as w1_cnt_c6
+ from {0}
+ window w1 as (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) as w_out last join {1} as t1 order by t1.x7 on c1 = t1.x1 and c7 - 1000 >= t1.x7
+ last join {1} as t2 order by t2.x7 on w_out.c2 = t2.x2 and c7 - 2000 >= t2.x7
+ last join {1} as t3 order by t3.x7 on w_out.c3 = t3.x3 and c7 - 3000 >= t3.x7
+ last join {1} as t4 order by t4.x7 on w_out.c4 = t4.x4 and c7 - 4000 >= t4.x7;
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 4000 >= t4.x7, left_keys=(), right_keys=(), index_keys=(w_out.c4))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 3000 >= t3.x7, left_keys=(), right_keys=(), index_keys=(w_out.c3))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 2000 >= t2.x7, left_keys=(), right_keys=(), index_keys=(w_out.c2))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=c7 - 1000 >= t1.x7, left_keys=(), right_keys=(), index_keys=(c1))
+ RENAME(name=w_out)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ RENAME(name=t2)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index2)
+ RENAME(name=t3)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index3)
+ RENAME(name=t4)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index4)
+ cluster_request_plan: |
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6, t1.rid -> t1_rid, t2.rid -> t2_rid, t3.rid -> t3_rid, t4.rid -> t4_rid))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ RENAME(name=w_out)
+ SIMPLE_PROJECT(sources=(id, c1, c2, c3, c4, c6, c7, cur_hour, today, w1_sum_c6, w1_max_c6, w1_min_c6, w1_avg_c6, w1_cnt_c6))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=RowProject)
+ DATA_PROVIDER(request=auto_t0)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 864000000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 1000 >= #35, left_keys=(), right_keys=(), index_keys=(#9))
+ SIMPLE_PROJECT(sources=(#9 -> c1, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 2000 >= #35, left_keys=(), right_keys=(), index_keys=(#10))
+ SIMPLE_PROJECT(sources=(#10 -> w_out.c2, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t2)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index2)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 3000 >= #35, left_keys=(), right_keys=(), index_keys=(#11))
+ SIMPLE_PROJECT(sources=(#11 -> w_out.c3, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t3)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index3)
+ REQUEST_JOIN(OUTPUT_RIGHT_ONLY, type=LastJoin, right_sort=(ASC), condition=#14 - 4000 >= #35, left_keys=(), right_keys=(), index_keys=(#12))
+ SIMPLE_PROJECT(sources=(#12 -> w_out.c4, #14 -> c7))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t4)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index4)
+ expect:
+ columns: ["id int", "c1 string", "c2 string", "c3 string", "c4 string", "c6 double", "c7 timestamp",
+ "cur_hour int32", "today int32", "w1_sum_c6 double", "w1_max_c6 double",
+ "w1_min_c6 double", "w1_avg_c6 double", "w1_cnt_c6 bigint",
+ "t1_rid int32", "t2_rid int32", "t3_rid int32", "t4_rid int32"]
+ order: id
+ rows:
+ - [ 1, "a", "aa", "aaa", "aaaa", "1.0", 1590738990000, 15, 29, 1.0, 1.0, 1.0, 1.0, 1, NULL, NULL, NULL, NULL]
+ - [ 2, "a", "aa", "aaa", "aaaa", "2.0", 1590738991000, 15, 29, 3.0, 2.0, 1.0, 1.5, 2, 1, NULL, NULL, NULL ]
+ - [ 3, "a", "aa", "aaa", "bbbb", "3.0", 1590738992000, 15, 29, 6.0, 3.0, 1.0, 2.0, 3, 2 , 1, NULL, NULL]
+ - [ 4, "a", "aa", "aaa", "bbbb", "4.0", 1590738993000, 15, 29, 10.0, 4.0, 1.0, 2.5, 4, 3 , 2, 1, NULL]
+ - [ 5, "a", "aa", "bbb", "bbbb", "5.0", 1590738994000, 15, 29, 15.0, 5.0, 1.0, 3.0, 5, 4 , 3, NULL, NULL]
+ - [ 6, "a", "aa", "bbb", "bbbb", "6.0", 1590738995000, 15, 29, 21.0, 6.0, 1.0, 3.5, 6, 5 , 4, NULL, NULL]
+ - [ 7, "a", "bb", "bbb", "bbbb", "7.0", 1590738996000, 15, 29, 28.0, 7.0, 1.0, 4.0, 7, 6 , NULL, NULL, 3]
+ - [ 8, "a", "bb", "bbb", "bbbb", "8.0", 1590738997000, 15, 29, 36.0, 8.0, 1.0, 4.5, 8, 7 , NULL, 5, 4]
+ - [ 9, "b", "bb", "bbb", "bbbb", "9.0", 1590738998000, 15, 29, 9.0, 9.0, 9.0, 9.0, 1, NULL , 7, 6, 5]
+ - [ 10, "b", "bb", "bbb", "bbbb", "10.0",1590738999000,15, 29, 19.0, 10.0, 9.0, 9.5, 2, 9, 8, 7, 6]
diff --git a/cases/integration_test/data_expiration/test_data_expiration.yaml b/cases/integration_test/data_expiration/test_data_expiration.yaml
new file mode 100644
index 00000000000..d686692bd92
--- /dev/null
+++ b/cases/integration_test/data_expiration/test_data_expiration.yaml
@@ -0,0 +1,70 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+- id: 0
+ desc: ttl_type=latest,ttl=4,insert 10
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:4:latest"]
+ rows:
+ - ["bb", 2, 3, 1590738989000]
+ - ["bb", 4, 5, 1590738990000]
+ - ["bb", 6, 7, 1590738991000]
+ - ["bb", 8, 9, 1590738992000]
+ - ["bb", 10, 11, 1590738993000]
+ - ["bb", 12, 13, 1590738994000]
+ - ["bb", 14, 15, 1590738995000]
+ - ["bb", 16, 17, 1590738996000]
+ - ["bb", 18, 19, 1590738997000]
+ - ["bb", 20, 21, 1590738998000]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 20, 21]
+ - ["bb", 18, 19]
+ - ["bb", 16, 17]
+ - ["bb", 14, 15]
+
+- id: 16
+ desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 4, 5, "{currentTime}-200"]
+ - ["bb", 6, 7, "{currentTime}-599000"]
+ - ["bb", 8, 9, "{currentTime}-600000"]
+ - ["bb", 10, 11, "{currentTime}-600005"]
+ - ["bb", 12, 13, "{currentTime}-600006"]
+ - ["bb", 14, 15, "{currentTime}-600007"]
+ - ["bb", 16, 17, "{currentTime}-600008"]
+ - ["bb", 18, 19, "{currentTime}-600009"]
+ - ["bb", 20, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 3]
+ - ["bb", 4, 5]
+ - ["bb", 6, 7]
\ No newline at end of file
diff --git a/cases/integration_test/ddl/test_create.yaml b/cases/integration_test/ddl/test_create.yaml
new file mode 100644
index 00000000000..c877221404e
--- /dev/null
+++ b/cases/integration_test/ddl/test_create.yaml
@@ -0,0 +1,559 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 创建所有类型的表
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 1
+ desc: 创建两个相同时间列的索引的表
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c1 string,
+ c2 int,
+ c3 timestamp,
+ c4 timestamp,
+ index(key=(c1),ts=c4),
+ index(key=(c2),ts=c4));
+ insert: |
+ insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - ["aa", 1, 1590738990000, 1590738989000]
+ -
+ id: 2
+ desc: 创建两个不同时间列的索引的表
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4","index2:c2:c3"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ -
+ id: 3
+ desc: 创建一个联合索引的表
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1|c2:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ -
+ id: 4
+ desc: NotNull的列为index
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["c1 string NOT NULL","c2 int","c3 timestamp","c4 timestamp"]
+ create: |
+ create table {0} (
+ c1 string NOT NULL,
+ c2 int,
+ c3 timestamp,
+ c4 timestamp,
+ index(key=(c1),ts=c4));
+ insert: |
+ insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ -
+ id: 5
+ desc: 表名以数字开头
+ sqlDialect: ["HybridSQL"]
+ sql: create table 1aaa(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 6-1
+ desc: 表名为保留关键字
+ sqlDialect: ["HybridSQL"]
+ sql: create table order(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 6-2
+ desc: 表名为非保留关键字
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - name: table
+ sql: create table table(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: true
+ -
+ id: 7
+ desc: 列名以数字开头
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(1c string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 8
+ desc: 列名为保留关键字
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(use string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 9
+ desc: 语句缺少分号
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: true
+ -
+ id: 10
+ desc: 列的类型不存在
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 varchar2 NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 11
+ desc: index指定的col不存在
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c5),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 12
+ desc: index指定的ts不存在
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c5,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 13
+ desc: 创建的index不指定ts
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1)));
+ expect:
+ success: true
+ -
+ id: 14
+ desc: 创建index不指定col
+ mode: standalone-unsupport
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(ts=c4,ttl=0m));
+ expect:
+ success: true
+ -
+ id: 15
+ desc: ts边界-指定的ts为string
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 string,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 16
+ desc: ts边界-指定的ts为int
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 17
+ desc: ts边界-指定的ts为smallint
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 smallint,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 18
+ desc: ts边界-指定的ts为date
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 date,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 19
+ desc: ts边界-指定的ts为float
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 float,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 20
+ desc: ts边界-指定的ts为double
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 double,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 21
+ desc: ts边界-指定的ts为bool
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1 string NOT NULL,c2 bool,c3 timestamp,c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 22
+ desc: 表名使用特殊字符
+ sqlDialect: ["HybridSQL"]
+ sql: create table auto$#kJKytImk(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 23
+ desc: 列名使用特殊字符
+ sqlDialect: ["HybridSQL"]
+ sql: create table {auto}(c1$# string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 24
+ desc: 指定的ts为bigint
+ inputs:
+ -
+ columns : ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c2"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"]
+ -
+ id: 25
+ desc: 指定的ts为bigint+ttl
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: create table {0} (c1 string NOT NULL,c2 bigint,c3 timestamp, c4 timestamp,index(key=(c1),ts=c2,ttl=0m));
+ insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"]
+ -
+ id: 26
+ desc: 创建已经存在的表
+ inputs:
+ -
+ columns : ["c1 string","c2 bigint","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ sql: create table {0}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: false
+ -
+ id: 27
+ desc: key边界-bigint为索引列
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c4:c7"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 28
+ desc: key边界-int为索引列
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 29
+ desc: key边界-timestamp为索引列
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c7:c4"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 30
+ desc: key边界-date为索引列
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c8),ts=c7));
+ expect:
+ success: true
+ -
+ id: 31
+ desc: key边界-float为索引列
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c5),ts=c7));
+ expect:
+ success: false
+ -
+ id: 32
+ desc: key边界-double为索引列
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c6),ts=c7));
+ expect:
+ success: false
+ -
+ id: 33
+ desc: key边界-smallint为索引列
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 34
+ desc: key边界-bool类型为索引列
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c9:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 35
+ desc: key边界-key和ts为同一列
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c4));
+ expect:
+ success: true
+ - id: 36
+ desc: create col with __prefix
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ __c1 string, __c3 int, __ts bigint,
+ index(key=__c1, ts=__ts));
+ expect:
+ success: true
+ -
+ id: 37
+ desc: create with replica num
+ sqlDialect: ["HybridSQL"]
+ mode: standalone-unsupport
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3), ts=c4))
+ options (
+ replicanum = 2
+ );
+ expect:
+ success: true
+ -
+ id: 38
+ desc: create with replica num and distribution
+ mode: standalone-unsupport
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3),ts=c4))
+ options (
+ replicanum = 2,
+ distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_1}'])]
+ );
+ expect:
+ success: true
+ -
+ id: 39
+ desc: create with replica num and distribution
+ mode: standalone-unsupport
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3),ts=c4))
+ options (
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_1}'])]
+ );
+ expect:
+ success: false
+ -
+ id: 40
+ desc: create with replica num and distribution
+ mode: standalone-unsupport
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3),ts=c4))
+ options (
+ replicanum = 2,
+ distribution = [ ('{tb_endpoint_0}', ['{tb_endpoint_0}'])]
+ );
+ expect:
+ success: false
+ -
+ id: 41
+ desc: create with partition num
+ sqlDialect: ["HybridSQL"]
+# mode: standalone-unsupport
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3), ts=c4))
+ options (
+ partitionnum = 8
+ );
+ expect:
+ success: true
+ -
+ id: 42
+ desc: create with partition num
+ sqlDialect: ["HybridSQL"]
+ mode: standalone-unsupport
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3), ts=c4))
+ options (
+ replicanum = 2,
+ partitionnum = 8
+ );
+ expect:
+ success: true
+ -
+ id: 43
+ desc: no index
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date);
+ expect:
+ success: true
+ -
+ id: 44
+ desc: bool-insert-1
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",1]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 45
+ desc: create with two no ts index
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3), ttl=(10m,10), ttl_type=absorlat),
+ index(key=(c4), ttl=(10m,10), ttl_type=absorlat));
+ expect:
+ success: true
+ -
+ id: 46
+ desc: one has ts and another has not
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3), ttl=(10m,10), ttl_type=absorlat),
+ index(key=(c4), ts=c4, ttl=(10m,10), ttl_type=absorlat));
+ expect:
+ success: true
+ -
+ id: 47
+ desc: create with only key
+ sqlDialect: ["HybridSQL"]
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c3)),
+ index(key=(c4)));
+ expect:
+ success: true
+ -
+ id: 48
+ desc: insert min int and max int
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int64,
+ order_0_timestamp timestamp,
+ c_2_int32 int32,
+ index(key=(id),ts=order_0_timestamp));
+ insert: |
+ insert into {0} values
+ (0,1538443518561,-2147483648);
+ sql: select * from {0};
+ expect:
+ success: true
diff --git a/cases/integration_test/ddl/test_create_index.yaml b/cases/integration_test/ddl/test_create_index.yaml
new file mode 100644
index 00000000000..6d4ce9e14cd
--- /dev/null
+++ b/cases/integration_test/ddl/test_create_index.yaml
@@ -0,0 +1,761 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 冒烟测试
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 100min
+ ttlType: kAbsoluteTime
+ -
+ id: 1
+ desc: 指定多个列
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c1,c2) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c1","c2"]
+ ts: "c4"
+ ttl: 100min
+ ttlType: kAbsoluteTime
+ -
+ id: 2
+ desc: 不指定ts
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c1,c2) OPTIONS (ttl=100, ttl_type=absolute);
+ expect:
+ success: false
+ -
+ id: 3
+ desc: 不指定ttl
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 4
+ desc: 不指定ttl_type
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100m);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 100min
+ ttlType: kAbsoluteTime
+ -
+ id: 5
+ desc: ttl_type=latest,ttl=1d
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1d, ttl_type=latest);
+ expect:
+ success: false
+ -
+ id: 6
+ desc: ttl_type=absolute,ttl=1d
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1d, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 1440min
+ ttlType: kAbsoluteTime
+ -
+ id: 7
+ desc: ttl_type=absolute,ttl=1h
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1h, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 60min
+ ttlType: kAbsoluteTime
+ -
+ id: 8
+ desc: ttl_type=absolute,ttl=1m
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 1min
+ ttlType: kAbsoluteTime
+ -
+ id: 9
+ desc: ttl_type=absolute,ttl=1s
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1s, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 1min
+ ttlType: kAbsoluteTime
+ -
+ id: 10
+ desc: ttl_type=absolute,ttl=1
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1, ttl_type=absolute);
+ expect:
+ success: false
+ -
+ id: 11
+ desc: ttl_type=absolute,ttl=0
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0, ttl_type=absolute);
+ expect:
+ success: false
+ -
+ id: 12
+ desc: ttl_type=absolute,ttl=0m
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 13
+ desc: ttl_type=latest,ttl=0
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=0, ttl_type=latest);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 0
+ ttlType: kLatestTime
+ -
+ id: 14
+ desc: ttl_type=latest,ttl=100
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=100, ttl_type=latest);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 100
+ ttlType: kLatestTime
+ -
+ id: 15
+ desc: ttl_type=absandlat,ttl=(10m,10)
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,10), ttl_type=absandlat);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 10min&&10
+ ttlType: kAbsAndLat
+ -
+ id: 16
+ desc: ttl_type=absorlat,ttl=(10m,10)
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,10), ttl_type=absorlat);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c4"
+ ttl: 10min||10
+ ttlType: kAbsOrLat
+ -
+ id: 17
+ desc: ttl_type=absandlat,ttl=(10,10m)
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10,10m), ttl_type=absandlat);
+ expect:
+ success: false
+ -
+ id: 18
+ desc: ttl_type=absorlat,ttl=(10,10m)
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10,10m), ttl_type=absorlat);
+ expect:
+ success: false
+ -
+ id: 19
+ desc: ttl_type为其他字符
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=1, ttl_type=test);
+ expect:
+ success: false
+ -
+ id: 20
+ desc: ttl为字符
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sql: CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=aaa, ttl_type=absolute);
+ expect:
+ success: false
+ -
+ id: 21
+ desc: 指定ttl_type=absolute,数据过期
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-60"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=10m, ttl_type=absolute);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 22
+ desc: 指定ttl_type=latest,部分数据过期
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,1590738990000]
+ - [2,"aa", 1, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
+ - [4,"aa", 1, 1590738990000,1590738993000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=2, ttl_type=latest);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 1, 1590738990000]
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 23
+ desc: 指定ttl_type=absandlat,部分数据过期
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,1590738990000]
+ - [2,"aa", 1, 1590738990000,1590738991000]
+ - [3,"aa", 1, 1590738990000,1590738992000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absandlat);
+ - select * from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 1, 1590738990000,1590738991000]
+ - [3,"aa", 1, 1590738990000,1590738992000]
+ -
+ id: 24
+ desc: 指定ttl_type=absorlat,部分数据过期
+ inputs:
+ -
+ columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absorlat"]
+ rows:
+ - ["aa", 1, 1590738990000,1590738990000]
+ - ["aa", 1, 1590738990000,1590738990000]
+ - ["aa", 1, 1590738990000,1590738990000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat);
+ - select * from {0} where c2 = 1;
+ expect:
+ count: 0
+ -
+ id: 25
+ desc: 指定ttl_type=absandlat,部分数据过期-边界
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absandlat);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [3,"aa", 1, 1590738990000]
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 26
+ desc: 指定ttl_type=absandlat,部分数据过期-边界2
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,1), ttl_type=absandlat);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 1, 1590738990000]
+ - [3,"aa", 1, 1590738990000]
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 27
+ desc: 指定ttl_type=absorlat,部分数据过期-边界
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [3,"aa", 1, 1590738990000]
+ -
+ id: 28
+ desc: 指定ttl_type=absorlat,部分数据过期-边界2
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}-400000"]
+ - [5,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [4,"aa", 1, 1590738990000]
+ - [5,"aa", 1, 1590738990000]
+ -
+ id: 29
+ desc: 先创建索引,在插入数据,测试过期-absolute
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-60"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=10m, ttl_type=absolute);
+ - insert into {0} values (5,'aa',1,1590738990000L,1590738990000L);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 30
+ desc: 先创建索引,在插入数据,测试过期-latest
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,1590738990000]
+ - [2,"aa", 1, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
+ - [4,"aa", 1, 1590738990000,1590738993000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=2, ttl_type=latest);
+ - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [4,"aa", 1, 1590738990000]
+ - [5,"aa", 1, 1590738990000]
+ -
+ id: 31
+ desc: 先创建索引,在插入数据,测试过期-absandlat
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ - [4,"aa", 1, 1590738990000,"{currentTime}"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,1), ttl_type=absandlat);
+ - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 1, 1590738990000]
+ - [3,"aa", 1, 1590738990000]
+ - [4,"aa", 1, 1590738990000]
+ -
+ id: 32
+ desc: 先创建索引,在插入数据,测试过期-absorlat
+ mode: standalone-unsupport
+ tags: ["单机版bug,添加索引后,select结果错误,@denglong,https://github.com/4paradigm/OpenMLDB/issues/708"]
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 1, 1590738990000,"{currentTime}-500000"]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c4, ttl=(10m,2), ttl_type=absorlat);
+ - insert into {0} values (5,'aa',1,1590738990000L,1590738994000L);
+ - select id,c1,c2,c3 from {0} where c2 = 1;
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [3,"aa", 1, 1590738990000]
+ -
+ id: 33
+ desc: key和ts相同
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c1) OPTIONS (ts=c4, ttl=100m, ttl_type=absolute);
+ expect:
+ success: false
+ -
+ id: 34
+ desc: 创建索引,ts为一个新的列
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ts=c3, ttl=100m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "c3"
+ ttl: 100min
+ ttlType: kAbsoluteTime
+ -
+ id: 35
+ desc: 创建一个没有ts的索引
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [aa,1,1590738990000,1590738989000]
+ sqls:
+ - CREATE INDEX index1 ON {0} (c2) OPTIONS (ttl=100m, ttl_type=absolute);
+ - desc {0};
+ expect:
+ success: true
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c4"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c2"]
+ ts: "-"
+ ttl: 100min
+ ttlType: kAbsoluteTime
\ No newline at end of file
diff --git a/cases/integration_test/ddl/test_create_no_index.yaml b/cases/integration_test/ddl/test_create_no_index.yaml
new file mode 100644
index 00000000000..603d53498b3
--- /dev/null
+++ b/cases/integration_test/ddl/test_create_no_index.yaml
@@ -0,0 +1,283 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 创建表不指定索引
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["id"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 1
+ desc: 第一列为smallint
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c2 smallint not null,
+ c3 float not null,
+ c4 double not null,
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c2"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 2
+ desc: 第一列为int
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c1 int not null,
+ c2 smallint not null,
+ c3 float not null,
+ c4 double not null,
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 3
+ desc: 第一列为long
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c5"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 4
+ desc: 第一列为float
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c3 float not null,
+ c4 double not null,
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c5"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 5
+ desc: 第一列为double
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c4 double not null,
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c5"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 6
+ desc: 第一列为string
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c6"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 7
+ desc: 第一列为timestamp
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c7"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 8
+ desc: 第一列为date
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c8 date not null,
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c8"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 9
+ desc: 第一列为bool
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c9 bool not null
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c9"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 10
+ desc: 只有一列
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c7 timestamp
+ );
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c7"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ id: 11
+ desc: 不指定索引创建表,然后增加索引
+ inputs:
+ -
+ create: |
+ create table {0} (
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null
+ );
+ sqls:
+ - "CREATE INDEX index1 ON {0} (c6) OPTIONS (ts=c7, ttl=100m, ttl_type=absolute);"
+ - "desc {0};"
+ expect:
+ idxs:
+ -
+ keys: ["c5"]
+ ts: "-"
+ ttl: 0min
+ ttlType: kAbsoluteTime
+ -
+ keys: ["c6"]
+ ts: "c7"
+ ttl: 100min
+ ttlType: kAbsoluteTime
+ -
+ id: 16
+ desc: 创建表指定索引,没有默认索引
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ indexs: ["index1:c1:c5"]
+ sql: desc {0};
+ expect:
+ idxs:
+ -
+ keys: ["c1"]
+ ts: "c5"
+ ttl: 0min
+ ttlType: kAbsoluteTime
diff --git a/cases/integration_test/ddl/test_options.yaml b/cases/integration_test/ddl/test_options.yaml
new file mode 100644
index 00000000000..d35fb6bec31
--- /dev/null
+++ b/cases/integration_test/ddl/test_options.yaml
@@ -0,0 +1,455 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 创建表时没有options
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ expect:
+ success: true
+ -
+ id: 1
+ desc: 冒烟测试
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_1}', [ '{tb_endpoint_0}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 2
+ desc: 创建表时没有partitionnum
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 1
+ -
+ id: 3
+ desc: 创建表时没有replicanum
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}','{tb_endpoint_2}'])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 4
+ desc: 创建表时没有distribution
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 3
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 5
+ desc: distribution多个
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 2,
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 2
+ replicaNum: 3
+ -
+ id: 6
+ desc: partitionnum=0,指定distribution
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 0,
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ success: false
+ -
+ id: 7
+ desc: partitionnum=10
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 10,
+ replicanum = 3
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 10
+ replicaNum: 3
+ -
+ id: 8
+ desc: replicanum=0
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 0,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 9
+ desc: replicanum=1
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 1
+ -
+ id: 10
+ desc: replicanum=4
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 4,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 11
+ desc: distribution小于replicanum
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 2,
+ distribution = [ ('{tb_endpoint_0}')]
+ );
+ expect:
+ success: false
+ -
+ id: 12
+ desc: distribution大于replicanum
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',['{tb_endpoint_1}'])]
+ );
+ expect:
+ success: false
+ -
+ id: 13
+ desc: distribution的个数和partitionnum对不上
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',[]),('{tb_endpoint_1}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 14
+ desc: distribution=[]
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 1,
+ distribution = []
+ );
+ expect:
+ success: false
+ -
+ id: 15
+ desc: partitionnum为字符
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = a,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 16
+ desc: replicanum为字符
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = a,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 17
+ desc: 只有partitionnum
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 18
+ desc: 只有replicanum
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ replicanum = 1
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 8
+ replicaNum: 1
+ -
+ id: 19
+ desc: 没有replicaNum,distribution的个数和tablet数量不一致
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ distribution = [ ('{tb_endpoint_0}', [])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 1
+ -
+ id: 20
+ desc: distribution指定的tablet不存在
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}1',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 21
+ desc: partitionnum大于distribution的个数
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 4,
+ replicanum = 1,
+ distribution = [ ('{tb_endpoint_0}',[])]
+ );
+ expect:
+ success: false
+ -
+ id: 22
+ desc: test-case
+ mode: standalone-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ replicaNum: 3
+ partitionNum: 1
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ sql: select * from {0};
+ expect:
+ name: "{0}"
+ success: true
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 23
+ desc: partitionnum=0,没有指定distribution
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 0,
+ replicanum = 3
+ );
+ expect:
+ success: false
+ -
+ id: 24
+ desc: 没有partitionnum和replicanum,指定distribution
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ name: t3
+ success: true
+ options:
+ partitionNum: 1
+ replicaNum: 3
+ -
+ id: 25
+ desc: distribution多于partitionnum
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 1,
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ success: false
+ -
+ id: 26
+ desc: distribution小于partitionnum
+ mode: standalone-unsupport
+ inputs:
+ - name: t3
+ sql: |
+ create table t3 (c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0m))
+ options (
+ partitionnum = 3,
+ replicanum = 3,
+ distribution = [ ('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ]),('{tb_endpoint_0}', [ '{tb_endpoint_1}','{tb_endpoint_2}' ])]
+ );
+ expect:
+ success: false
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cases/integration_test/ddl/test_ttl.yaml b/cases/integration_test/ddl/test_ttl.yaml
new file mode 100644
index 00000000000..9071a91611f
--- /dev/null
+++ b/cases/integration_test/ddl/test_ttl.yaml
@@ -0,0 +1,317 @@
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 指定ttl-单位d
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650d));
+ expect:
+ success: true
+ -
+ id: 1
+ desc: 指定ttl-单位h
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650h));
+ expect:
+ success: true
+ -
+ id: 2
+ desc: 指定ttl-单位m
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650m));
+ expect:
+ success: true
+ -
+ id: 3
+ desc: 指定ttl-没有单位
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650));
+ expect:
+ success: false
+ -
+ id: 4
+ desc: ttl_type=absolute-没有单位
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650,ttl_type=absolute));
+ expect:
+ success: false
+ -
+ id: 5
+ desc: ttl_type=latest-带单位
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=3650m,ttl_type=latest));
+ expect:
+ success: false
+ -
+ id: 6
+ desc: ttl_type=absolute-ttl=(3650m)
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(3650m),ttl_type=absolute));
+ expect:
+ success: true
+ -
+ id: 7
+ desc: ttl_type=latest-ttl=(3650)
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(3650),ttl_type=latest));
+ expect:
+ success: false
+ -
+ id: 8
+ desc: ttl=0m
+ inputs:
+ -
+ create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0m));
+ insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - ["aa",1,1590738990000,1590738989000]
+ -
+ id: 9
+ desc: ttl为字符
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=aa));
+ expect:
+ success: false
+ -
+ id: 10
+ desc: 指定ttl_type=absolute
+ inputs:
+ -
+ create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0m, ttl_type=absolute));
+ insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - ["aa",1,1590738990000,1590738989000]
+ -
+ id: 11
+ desc: 指定ttl_type=latest
+ inputs:
+ -
+ create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=0, ttl_type=latest));
+ insert: insert into {0} values ("aa", 1, 1590738990000, 1590738989000);
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - ["aa",1,1590738990000,1590738989000]
+ -
+ id: 12
+ desc: 指定ttl_type为其他字符
+ sql: create table {auto}(c1 string NOT NULL,c2 int,c3 timestamp,c4 timestamp,index(key=(c1),ts=c4,ttl=0,ttl_type=aaa));
+ expect:
+ success: false
+ -
+ id: 13
+ desc: ttl_type=absorlat
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absorlat));
+ expect:
+ success: true
+ -
+ id: 14
+ desc: ttl_type=absorlat,ttl=(10,10m)
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10,10m), ttl_type=absandlat));
+ expect:
+ success: false
+ -
+ id: 15
+ desc: ttl_type=absandlat
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absandlat));
+ expect:
+ success: true
+ -
+ id: 16
+ desc: ttl_type=absandlat,ttl=(10,10m)
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10,10m), ttl_type=absandlat));
+ expect:
+ success: false
+ -
+ id: 17
+ desc: ttl_type=latest,ttl带单位
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=10m, ttl_type=latest));
+ expect:
+ success: false
+ -
+ id: 18
+ desc: ttl_type=latest,ttl=(10m,10)
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=latest));
+ expect:
+ success: false
+ -
+ id: 19
+ desc: ttl_type=absolute,ttl=(10m,10)
+ sql: |
+ create table {auto} (
+ c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c4),ts=c7, ttl=(10m,10), ttl_type=absolute));
+ expect:
+ success: false
+ -
+ id: 20
+ desc: 指定ttl_type=absolute,数据过期
+ inputs:
+ -
+ create: create table {0} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=10m, ttl_type=absolute));
+ insert: insert into {0} values ("aa", 1, 1590738990000, 1614672180000);
+ sql: select * from {0};
+ expect:
+ count: 0
+ -
+ id: 21
+ desc: 指定ttl_type=latest,部分数据过期
+ inputs:
+ -
+ columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:1:latest"]
+ rows:
+ - ["aa", 1, 1590738990000,1590738990000]
+ - ["aa", 2, 1590738990000,1590738990000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ rows:
+ - ["aa", 2, 1590738990000,1590738990000]
+ -
+ id: 22
+ desc: 指定ttl_type=absandlat,部分数据过期
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absandlat"]
+ rows:
+ - [1,"aa", 1, 1590738990000,1590738990000]
+ - [2,"aa", 2, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
+ sql: select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp","c4 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 2, 1590738990000,1590738991000]
+ - [3,"aa", 3, 1590738990000,1590738992000]
+ -
+ id: 23
+ desc: 指定ttl_type=absorlat,部分数据过期
+ inputs:
+ -
+ columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absorlat"]
+ rows:
+ - ["aa", 1, 1590738990000,1590738990000]
+ - ["aa", 1, 1590738990000,1590738990000]
+ - ["aa", 1, 1590738990000,1590738990000]
+ sql: select * from {0};
+ expect:
+ count: 0
+ -
+ id: 24
+ desc: 指定ttl_type=absolute,部分数据过期
+ inputs:
+ -
+ columns: ["c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:10m:absolute"]
+ rows:
+ - ["aa", 1, 1590738990000, "{currentTime}-600001"]
+ - ["bb", 1, 1590738990000, "{currentTime}"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 timestamp"]
+ rows:
+ - ["bb", 1, 1590738990000]
+ -
+ id: 25
+ desc: 指定ttl_type=absandlat,部分数据过期-边界
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absandlat"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 2, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-600001"]
+ - [4,"aa", 4, 1590738990000,"{currentTime}"]
+ sql: select id,c1,c2,c3 from {0};
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [3,"aa", 3, 1590738990000]
+ - [4,"aa", 4, 1590738990000]
+ -
+ id: 26
+ desc: 指定ttl_type=absandlat,部分数据过期-边界2
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absandlat"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 2, 1590738990000,"{currentTime}-550000"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-500000"]
+ - [4,"aa", 4, 1590738990000,"{currentTime}"]
+ sql: select id,c1,c2,c3 from {0};
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [2,"aa", 2, 1590738990000]
+ - [3,"aa", 3, 1590738990000]
+ - [4,"aa", 4, 1590738990000]
+ -
+ id: 27
+ desc: 指定ttl_type=absorlat,部分数据过期-边界
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absorlat"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 2, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-500000"]
+ sql: select id,c1,c2,c3 from {0};
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [3,"aa", 3, 1590738990000]
+ -
+ id: 28
+ desc: 指定ttl_type=absorlat,部分数据过期-边界2
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 int","c3 timestamp", "c4 timestamp"]
+ indexs: ["index1:c1:c4:(10m,2):absorlat"]
+ rows:
+ - [1,"aa", 1, 1590738990000,"{currentTime}-600001"]
+ - [2,"aa", 2, 1590738990000,"{currentTime}-600001"]
+ - [3,"aa", 3, 1590738990000,"{currentTime}-500000"]
+ - [4,"aa", 4, 1590738990000,"{currentTime}-400000"]
+ - [5,"aa", 5, 1590738990000,"{currentTime}"]
+ sql: select id,c1,c2,c3 from {0};
+ expect:
+ columns: ["id int","c1 string","c2 int","c3 timestamp"]
+ order: id
+ rows:
+ - [4,"aa", 4, 1590738990000]
+ - [5,"aa", 5, 1590738990000]
+ -
+ id: 29
+ desc: ttl_type=latest-ttl=(10)
+ sql: create table {auto} (c1 string NOT NULL,c2 int,c3 timestamp, c4 timestamp,index(key=(c1),ts=c4,ttl=(10),ttl_type=latest));
+ expect:
+ success: true
diff --git a/cases/integration_test/deploy/test_create_deploy.yaml b/cases/integration_test/deploy/test_create_deploy.yaml
new file mode 100644
index 00000000000..bc90cdaccf2
--- /dev/null
+++ b/cases/integration_test/deploy/test_create_deploy.yaml
@@ -0,0 +1,621 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: 冒烟测试-正常deploy
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sqls:
+ - deploy deploy_{0} select * from {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ *
+ FROM
+ {0}
+ ;
+ inColumns:
+ - 1,c1,kVarchar,NO
+ - 2,c2,kInt16,NO
+ - 3,c3,kInt32,NO
+ - 4,c4,kInt64,NO
+ - 5,c5,kFloat,NO
+ - 6,c6,kDouble,NO
+ - 7,c7,kTimestamp,NO
+ - 8,c8,kDate,NO
+ outColumns:
+ - 1,c1,kVarchar,NO
+ - 2,c2,kInt16,NO
+ - 3,c3,kInt32,NO
+ - 4,c4,kInt64,NO
+ - 5,c5,kFloat,NO
+ - 6,c6,kDouble,NO
+ - 7,c7,kTimestamp,NO
+ - 8,c8,kDate,NO
+
+ - id: 1
+ desc: deploy一个lastjoin
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ sqls:
+ - deploy deploy_{0} select {0}.c1,{0}.c2,{1}.c4,{2}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} order by {2}.c4 on {0}.c1={2}.c1;
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ {0}.c1,
+ {0}.c2,
+ {1}.c4,
+ {2}.c4
+ FROM
+ {0}
+ LAST JOIN
+ {1}
+ ORDER BY {1}.c4
+ ON {0}.c1 = {1}.c1
+ LAST JOIN
+ {2}
+ ORDER BY {2}.c4
+ ON {0}.c1 = {2}.c1
+ ;
+ inColumns:
+ - 1,c1,kVarchar,NO
+ - 2,c2,kInt32,NO
+ - 3,c3,kInt64,NO
+ - 4,c4,kTimestamp,NO
+ outColumns:
+ - 1,c1,kVarchar,NO
+ - 2,c2,kInt32,NO
+ - 3,c4,kTimestamp,NO
+ - 4,c4,kTimestamp,NO
+ -
+ id: 2
+ desc: deploy一个window-ROWS
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ id,
+ c1,
+ sum(c4) OVER (w1) AS w1_c4_sum
+ FROM
+ {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1
+ ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING)
+ ;
+ inColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,c3,kInt32,NO
+ - 4,c4,kInt64,NO
+ - 5,c5,kFloat,NO
+ - 6,c6,kDouble,NO
+ - 7,c7,kTimestamp,NO
+ - 8,c8,kDate,NO
+ outColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,w1_c4_sum,kInt64,NO
+ -
+ id: 3
+ desc: deploy一个window-ROWS_RANGE
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ id,
+ c1,
+ sum(c4) OVER (w1) AS w1_c4_sum
+ FROM
+ {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1
+ ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING)
+ ;
+ inColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,c3,kInt32,NO
+ - 4,c4,kInt64,NO
+ - 5,c5,kFloat,NO
+ - 6,c6,kDouble,NO
+ - 7,c7,kTimestamp,NO
+ - 8,c8,kDate,NO
+ outColumns:
+ - 1,id,kInt32,NO
+ - 2,c1,kVarchar,NO
+ - 3,w1_c4_sum,kInt64,NO
+ -
+ id: 4
+ desc: deploy一个子查询
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ v2,
+ v3
+ FROM
+ (
+ SELECT
+ c2 + 1 AS v2,
+ c3 + 1 AS v3
+ FROM
+ {0}
+ ) AS t
+ ;
+ inColumns:
+ - 1,c1,kVarchar,NO
+ - 2,c2,kInt32,NO
+ - 3,c3,kInt64,NO
+ - 4,c4,kTimestamp,NO
+ outColumns:
+ - 1,v2,kInt32,NO
+ - 2,v3,kInt64,NO
+ -
+ id: 5
+ desc: deploy一个子查询、window、lastjoin
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",10, 1590738990000, 3.3]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "aaaaaaaaaa"]
+ - [1590738990000, "aaaaaaaaaa"]
+ - [1590738989000, "cc"]
+ - [1590738992000, "cc"]
+ sqls:
+ - deploy deploy_{0} select * from(select
+ id,card_no,trx_time,substr(card_no, 1, 6) as card_no_prefix,sum(trx_amt) over w30d as sum_trx_amt,count(merchant_id) over w10d as count_merchant_id from {0}
+ window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe
+ last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte;
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ deployment :
+ name: deploy_{0}
+ dbName: test_zw
+ sql: |
+ DEPLOY deploy_{0} SELECT
+ *
+ FROM
+ (
+ SELECT
+ id,
+ card_no,
+ trx_time,
+ substr(card_no, 1, 6) AS card_no_prefix,
+ sum(trx_amt) OVER (w30d) AS sum_trx_amt,
+ count(merchant_id) OVER (w10d) AS count_merchant_id
+ FROM
+ {0}
+ WINDOW w30d AS (PARTITION BY {0}.card_no
+ ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW), w10d AS (PARTITION BY {0}.card_no
+ ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)
+ ) AS trx_fe
+ LAST JOIN
+ {1}
+ ORDER BY {1}.crd_lst_isu_dte
+ ON trx_fe.card_no = {1}.crd_nbr AND trx_fe.trx_time >= {1}.crd_lst_isu_dte
+ ;
+ inColumns:
+ - 1,id,kInt32,NO
+ - 2,card_no,kVarchar,NO
+ - 3,merchant_id,kInt32,NO
+ - 4,trx_time,kTimestamp,NO
+ - 5,trx_amt,kFloat,NO
+ outColumns:
+ - 1,id,kInt32,NO
+ - 2,card_no,kVarchar,NO
+ - 3,trx_time,kTimestamp,NO
+ - 4,card_no_prefix,kVarchar,NO
+ - 5,sum_trx_amt,kFloat,NO
+ - 6,count_merchant_id,kInt64,NO
+ - 7,crd_lst_isu_dte,kTimestamp,NO
+ - 8,crd_nbr,kVarchar,NO
+ -
+ id: 6
+ desc: deploy的sql中指定其他库
+ db: db
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sqls:
+ - deploy deploy_{0} select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ success: false
+ -
+ id: 7
+ desc: deploy sql错误
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: deploy deploy_{0} select * from {0}11;
+ expect:
+ success: false
+ -
+ id: 8
+ desc: deploy 同名service
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sqls:
+ - deploy deploy_{0} select * from {0};
+ - deploy deploy_{0} select * from {0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
+ -
+ id: 9
+ desc: deploy 语法错误
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: deploy deployment deploy_{0} select * from {0};
+ expect:
+ success: false
+ -
+ id: 10
+ desc: deploy 一个insert
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: deploy deploy_{0} insert into {0} values('aa',1,2,3,1.1,2.1,1590738989000,'2020-05-01');
+ expect:
+ success: false
+ -
+ id: 11
+ desc: deploy 和表名重复
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sqls:
+ - deploy {0} select * from {0};
+ - show deployment {0};
+ tearDown:
+ - drop deployment {0};
+ expect:
+ success: true
+ -
+ id: 12
+ desc: 表没有索引,deploy一个window
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int not null,
+ c1 int not null,
+ c7 timestamp not null
+ );
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c1) OVER w1 as w1_c1_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 13
+ desc: 表已经有索引,deploy一个window,使用另一个索引,列和ts都不同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c4"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 14
+ desc: 表已经有索引,deploy一个window,索引的column相同,ts不同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c4"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 15
+ desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,ROWS
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 16
+ desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,ROWS_RANGE
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 17
+ desc: 表已经有索引,deploy一个window,索引的column不同,ts相同,rows_range带单位
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2h PRECEDING AND 1h PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 18
+ desc: deploy的sql中指定其他库,其中一个表使用默认库
+ db: db1
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sqls:
+ - deploy deploy_{0} select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ success: false
+ -
+ id: 19
+ desc: 多个window
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c4:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 20
+ desc: 组合索引-ROWS
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 21
+ desc: 组合索引-ROWS_RANGE
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 22
+ desc: 表已经有索引,deploy一个window,索引的column相同和ts都相同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 23
+ desc: 组合索引-索引相同
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - desc {0};
+ - show deployment deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
+ -
+ id: 24
+ desc: 表有数据,deploy创建新索引
+ tags: ["TODO","cicd大概率失败,@denglong,https://github.com/4paradigm/OpenMLDB/issues/1116"]
+ mode: standalone-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ expect:
+ success: false
\ No newline at end of file
diff --git a/cases/integration_test/deploy/test_drop_deploy.yaml b/cases/integration_test/deploy/test_drop_deploy.yaml
new file mode 100644
index 00000000000..7e40d4214df
--- /dev/null
+++ b/cases/integration_test/deploy/test_drop_deploy.yaml
@@ -0,0 +1,85 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: 正常删除service
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - drop deployment deploy_{0};
+ - show deployments;
+ expect:
+ deploymentCount: 0
+ -
+ id: 1
+ desc: service不存在
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - drop deployment deploy_{0}11;
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
+ -
+ id: 2
+ desc: 语法错误
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - drop deployments deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
+ -
+ id: 3
+ desc: 删除其他库的service
+ db: db
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - drop deployment db.deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
\ No newline at end of file
diff --git a/cases/integration_test/deploy/test_show_deploy.yaml b/cases/integration_test/deploy/test_show_deploy.yaml
new file mode 100644
index 00000000000..32d3c27d89f
--- /dev/null
+++ b/cases/integration_test/deploy/test_show_deploy.yaml
@@ -0,0 +1,88 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: 查看所有deployment
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - deploy deploy_{0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - deploy {0} SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ - show deployments;
+ tearDown:
+ - drop deployment deploy_{0};
+ - drop deployment {0};
+ expect:
+ deploymentCount: 2
+ -
+ id: 1
+ desc: service不存在
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - show deployment deploy_{0}11;
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
+ -
+ id: 2
+ desc: 语法错误
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - show deployments deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: false
+ -
+ id: 3
+ desc: 查看其他库的service
+ db: db
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sqls:
+ - deploy deploy_{0} select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ - show deployment db.deploy_{0};
+ tearDown:
+ - drop deployment deploy_{0};
+ expect:
+ success: true
\ No newline at end of file
diff --git a/cases/integration_test/disk_table/disk_table.yaml b/cases/integration_test/disk_table/disk_table.yaml
new file mode 100644
index 00000000000..33c0b45e0be
--- /dev/null
+++ b/cases/integration_test/disk_table/disk_table.yaml
@@ -0,0 +1,486 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 创建SSD表,插入多条数据,查询
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 1
+ desc: 创建HDD表,插入多条数据,查询
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+
+ -
+ id: 2
+ desc: ssd和内存表,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 3
+ desc: hdd和内存表,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 4
+ desc: 内存表和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 5
+ desc: 内存表和hdd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 6
+ desc: hdd和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ id: 7
+ desc: hdd和ssd,join
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: memory
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+ sql: select {1}.c1,{1}.c2,{2}.c3,{0}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} on {0}.c1 = {2}.c1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["bb", 21, 31, 1590738990000]
+ - ["cc", 41, 51, 1590738991000]
+
+ - id: 8
+ desc: ssd union 内存表
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: SSD
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 9
+ desc: hdd union 内存表
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: HDD
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 10
+ desc: 内存表 union ssd
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: SSD
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 11
+ desc: 内存表 union hdd
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ storage: HDD
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 12
+ desc: SSD 插入索引和ts 一样的数据
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: SSD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - id: 13
+ desc: HDD 插入索引和ts 一样的数据
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ storage: HDD
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ - ["aa", 2, 3, 1590738989000]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa", 2, 3, 1590738989000]
+ - id: 14
+ desc: storage_mode=其他字符
+ mode: request-unsupport
+ sql: |
+ create table auto_MDYewbTv(
+ c1 string,
+ c2 int,
+ c3 bigint,
+ c4 timestamp,
+ index(key=(c1),ts=c4))options(partitionnum=1,replicanum=1,storage_mode="hdp");
+ expect:
+ success: false
+
+ - id: 15
+ desc: 创建磁盘表,ttl_type=latest,ttl=4,insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:4:latest"]
+ storage: SSD
+ rows:
+ - ["bb", 2, 3, 1590738989000]
+ - ["bb", 4, 5, 1590738990000]
+ - ["bb", 6, 7, 1590738991000]
+ - ["bb", 8, 9, 1590738992000]
+ - ["bb", 10, 11, 1590738993000]
+ - ["bb", 12, 13, 1590738994000]
+ - ["bb", 14, 15, 1590738995000]
+ - ["bb", 16, 17, 1590738996000]
+ - ["bb", 18, 19, 1590738997000]
+ - ["bb", 20, 21, 1590738998000]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 20, 21]
+ - ["bb", 18, 19]
+ - ["bb", 16, 17]
+ - ["bb", 14, 15]
+
+ - id: 16
+ desc: 创建磁盘表,ttl_type=absolute,ttl=10m, insert 10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 4, 5, "{currentTime}-200"]
+ - ["bb", 6, 7, "{currentTime}-599000"]
+ - ["bb", 8, 9, "{currentTime}-600000"]
+ - ["bb", 10, 11, "{currentTime}-600005"]
+ - ["bb", 12, 13, "{currentTime}-600006"]
+ - ["bb", 14, 15, "{currentTime}-600007"]
+ - ["bb", 16, 17, "{currentTime}-600008"]
+ - ["bb", 18, 19, "{currentTime}-600009"]
+ - ["bb", 20, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 3]
+ - ["bb", 4, 5]
+ - ["bb", 6, 7]
+
+ - id: 17
+ desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 2, 5, "{currentTime}-200"]
+ - ["bb", 2, 7, "{currentTime}-59"]
+ - ["bb", 2, 9, "{currentTime}-600"]
+ - ["bb", 2, 11, "{currentTime}-602"]
+ - ["bb", 2, 13, "{currentTime}-600006"]
+ - ["bb", 2, 15, "{currentTime}-600007"]
+ - ["bb", 2, 17, "{currentTime}-600008"]
+ - ["bb", 2, 19, "{currentTime}-600009"]
+ - ["bb", 2, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 7]
+ - ["bb", 2, 3]
+ - ["bb", 2, 5]
+ - ["bb", 2, 9]
+
+ - id: 18
+ desc: 创建磁盘表,有两个索引,分别为latest和absolute,insert=10 ,where条件
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index2:c2:c4:4:latest","index1:c1:c4:10m:absolute"]
+ storage: hdd
+ rows:
+ - ["bb", 2, 3, "{currentTime}-100"]
+ - ["bb", 2, 5, "{currentTime}-200"]
+ - ["bb", 2, 7, "{currentTime}-59"]
+ - ["bb", 2, 9, "{currentTime}-600"]
+ - ["bb", 2, 11, "{currentTime}-602"]
+ - ["bb", 2, 13, "{currentTime}-600006"]
+ - ["bb", 2, 15, "{currentTime}-600007"]
+ - ["bb", 2, 17, "{currentTime}-600008"]
+ - ["bb", 2, 19, "{currentTime}-600009"]
+ - ["bb", 2, 21, "{currentTime}-600010"]
+ sql: select c1,c2,c3 from {0} where c1 = "bb";
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint"]
+ rows:
+ - ["bb", 2, 7]
+ - ["bb", 2, 3]
+ - ["bb", 2, 5]
+ - ["bb", 2, 9]
+ - ["bb", 2, 11]
diff --git a/cases/integration_test/dml/multi_insert.yaml b/cases/integration_test/dml/multi_insert.yaml
new file mode 100644
index 00000000000..1f606089abe
--- /dev/null
+++ b/cases/integration_test/dml/multi_insert.yaml
@@ -0,0 +1,287 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+db: multi_insert_db
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 简单INSERT
+ inputs:
+ -
+ create: |
+ create table {0} (
+ col0 string not null,
+ col1 int not null,
+ col2 smallint not null,
+ col3 float not null,
+ col4 double not null,
+ col5 bigint not null,
+ col6 string not null,
+ col7 timestamp not null,
+ col8 date not null,
+ col9 bool not null,
+ index(key=(col2), ts=col5)
+ );
+ insert: insert into {0} values("hello", 1, 2, 3.3f, 4.4, 5L, "world", 12345678L, "2020-05-21", true);
+ sql: select * from {0};
+ expect:
+ columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64",
+ "col6 string", "col7 timestamp", "col8 date", "col9 bool"]
+ order: col1
+ rows:
+ - [hello, 1, 2, 3.3, 4.4, 5, world, 12345678, "2020-05-21", true]
+ - id: 1
+ desc: 简单INSERT 多行
+ inputs:
+ -
+ create: |
+ create table {0} (
+ col0 string not null,
+ col1 int not null,
+ col2 smallint not null,
+ col3 float not null,
+ col4 double not null,
+ col5 bigint not null,
+ col6 string not null,
+ index(key=(col2), ts=col5)
+ );
+ insert: |
+ insert into {0} values
+ ("hello", 1, 2, 3.3, 4.4, 5, "world"),
+ ("hello", 11, 22, 33.3, 44.4, 55, "fesql");
+
+ sql: select * from {0};
+ expect:
+ columns: [ "col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64",
+ "col6 string"]
+ order: col1
+ rows:
+ - [hello, 1, 2, 3.3, 4.4, 5, world]
+ - [hello, 11, 22, 33.3, 44.4, 55, fesql]
+
+ - id: 2
+ desc: 简单INSERT timestamp
+ inputs:
+ - create: |
+ create table {0} (
+ col1 int not null,
+ col5 bigint not null,
+ std_ts timestamp not null,
+ index(key=(col1), ts=col5)
+ );
+ insert: |
+ insert into {0} values
+ (1, 10000L, 1590738987000L),
+ (2, 20000L, 1590738988000L);
+ sql: select * from {0};
+ expect:
+ columns: ["col1 int", "col5 bigint", "std_ts timestamp"]
+ order: col1
+ rows:
+ - [1, 10000, 1590738987000]
+ - [2, 20000, 1590738988000]
+
+ - id: 3
+ desc: insert 指定列,其他列默认为NULL
+ inputs:
+ -
+ create: |
+ create table {0} (
+ col1 int not null,
+ col2 smallint,
+ col3 float,
+ col4 double,
+ col5 bigint not null,
+ std_ts timestamp not null,
+ str string,
+ index(key=(col1), ts=col5)
+ );
+ insert: |
+ insert into {0} (col1, col5, std_ts) values
+ (1, 10000L, 1590738987000L),
+ (2, 20000L, 1590738988000L);
+ sql: select * from {0};
+ expect:
+ columns: ["col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "std_ts timestamp", "str string"]
+ order: col1
+ rows:
+ - [1, NULL, NULL, NULL, 10000, 1590738987000, NULL]
+ - [2, NULL, NULL, NULL, 20000, 1590738988000, NULL]
+ - id: 4
+ desc: Insert date
+ inputs:
+ - create: |
+ create table {0} (
+ col1 int not null,
+ col2 smallint,
+ col3 float,
+ col4 double,
+ col5 bigint not null,
+ std_date date not null,
+ str string,
+ index(key=(col1), ts=col5)
+ );
+ insert: |
+ insert into {0} (col1, col5, std_date) values
+ (1, 10000L, '2020-05-27'),
+ (2, 20000L, '2020-05-28');
+
+ sql: select * from {0};
+ expect:
+ columns: [ "col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "std_date date", "str string" ]
+ order: col1
+ rows:
+ - [1, NULL, NULL, NULL, 10000, "2020-05-27", NULL]
+ - [2, NULL, NULL, NULL, 20000, "2020-05-28", NULL]
+ - id: 5
+ desc: 简单INSERT NULL value
+ inputs:
+ -
+ create: |
+ create table {0} (
+ col0 string not null,
+ col1 int not null,
+ col2 smallint,
+ col3 float not null,
+ col4 double not null,
+ col5 bigint not null,
+ col6 string not null,
+ index(key=(col2), ts=col5)
+ );
+ insert: |
+ insert into {0} values ("hello", 1, NULL, 3.3f, 4.4, 5L, "world"),
+ ("hi", 2, NULL, 33.3f, 44.4, 55L, "db");
+ sql: select * from {0};
+ expect:
+ columns: [ "col0 string", "col1 int", "col2 int16", "col3 float", "col4 double", "col5 bigint", "col6 string" ]
+ order: col1
+ rows:
+ - [hello, 1, NULL, 3.3, 4.4, 5, world]
+ - [hi, 2, NULL, 33.3, 44.4, 55, db]
+ -
+ id: 6
+ desc: 所有列插入多条
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int not null,
+ c1 int not null,
+ c2 smallint not null,
+ c3 float not null,
+ c4 double not null,
+ c5 bigint not null,
+ c6 string not null,
+ c7 timestamp not null,
+ c8 date not null,
+ c9 bool not null,
+ index(key=(c1), ts=c5)
+ );
+ insert: |
+ insert into {0} values
+ (1, 1, 2, 3.3f, 4.4, 5L, "aa", 12345678L, "2020-05-21", true),
+ (2, 10, 20, 3.31f, 4.41, 50L, "bb", 12345679L, "2020-05-22", false);
+ sql: select * from {0};
+ expect:
+ columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true]
+ - [2,10,20,3.31,4.41,50,"bb",12345679,"2020-05-22",false]
+ -
+ id: 7
+ desc: 其中一条数据类型不兼容
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int,
+ c1 int,
+ c2 smallint,
+ c3 float,
+ c5 bigint,
+ index(key=(c1), ts=c5)
+ );
+ sql: |
+ insert into {0} (id,c3,c5)values
+ (1, 3.3,12345678),
+ (2, "aa",12345679);
+ expect:
+ success: false
+ -
+ id: 8
+ desc: 插入多条空串
+ mode: cli-unsupport
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int,
+ c1 int,
+ c2 string,
+ c3 float,
+ c5 bigint,
+ index(key=(c1), ts=c5)
+ );
+ insert: |
+ insert into {0} (id,c2,c3,c5)values
+ (1, "",null,12345678),
+ (2, "",null,12345679);
+ sql: select * from {0};
+ expect:
+ columns : ["id int","c1 int","c2 string","c3 float","c5 bigint"]
+ order: id
+ rows:
+ - [1,null,"",null,12345678]
+ - [2,null,"",null,12345679]
+ -
+ id: 9
+ desc: 插入数据和列的数量不匹配
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int,
+ c1 int,
+ c2 smallint,
+ c3 float,
+ c5 bigint,
+ index(key=(c1), ts=c5)
+ );
+ sql: |
+ insert into {0} (id,c3,c5)values
+ (1,12345678),
+ (2,12345679);
+ expect:
+ success: false
+ -
+ id: 10
+ desc: 其中一条数据少一列
+ inputs:
+ -
+ create: |
+ create table {0} (
+ id int,
+ c1 int,
+ c2 smallint,
+ c3 float,
+ c5 bigint,
+ index(key=(c1), ts=c5)
+ );
+ sql: |
+ insert into {0} (id,c3,c5)values
+ (1, 3.3,12345678),
+ (2,12345679);
+ expect:
+ success: false
\ No newline at end of file
diff --git a/cases/integration_test/dml/test_delete.yaml b/cases/integration_test/dml/test_delete.yaml
new file mode 100644
index 00000000000..d73709145d5
--- /dev/null
+++ b/cases/integration_test/dml/test_delete.yaml
@@ -0,0 +1,598 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.6.0
+cases:
+ -
+ id: 0
+ desc: delete 一个key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 1
+ desc: delete 组合索引
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' and c2=1;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [4,"aa",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ -
+ id: 2
+ desc: delete 一个索引的两个key
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' or c1='cc';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 3
+ desc: delete 两个索引的两个key
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c2:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa' or c2=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 4
+ desc: 两个索引 delete 其中一个
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"aa",1,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=2;
+ sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint"]
+ order: id
+ rows:
+ - [1,1,1]
+ - [2,1,2]
+ - [4,1,3]
+ -
+ id: 5
+ desc: delete 不是索引列
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 6
+ desc: delete key不存在
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='cc';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 7
+ desc: delete null
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,null,1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1=null;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 8
+ desc: delete 空串
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 10
+ desc: delete int
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,3,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c3=3;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 11
+ desc: delete smallint
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 12
+ desc: delete bigint
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c4:c7"]
+ rows:
+ - [1,"aa",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,4,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c4=4;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 13
+ desc: delete date
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c8:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-02",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c8='2020-05-02';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 14
+ desc: delete timestamp
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c7:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c7=1590738989000;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 15
+ desc: delete bool
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c9:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c9=true;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",false]
+ -
+ id: 16
+ desc: 两次delete相同index 不同的key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - delete from {0} where c1='cc';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 17
+ desc: 两次delete 不同的index
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - delete from {0} where c2=2;
+ sql: |
+ SELECT id, c2, count(c4) OVER w1 as w1_c4_count, count(c5) OVER w2 as w2_c5_count FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint","w2_c5_count bigint"]
+ order: id
+ rows:
+ - [1,1,1,1]
+ - [2,1,1,2]
+ -
+ id: 18
+ desc: delete过期数据
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 19
+ desc: delete表不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sql: delete from {0}1 where c1='aa';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 20
+ desc: delete列不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - ["aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - ["cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c11=1;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 21
+ desc: delete 其他库的数据
+ inputs:
+ -
+ db: d1
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from d1.{0} where c1='aa';
+ - select * from d1.{0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 22
+ desc: 两个index中key相同 delete 一个key
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7","index2:c1:c4:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ -
+ id: 23
+ desc: delete全部数据
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ - select * from {0};
+ expect:
+ count: 0
+ -
+ id: 24
+ desc: 两个索引,一个索引数据过期,删除另一个索引
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest","index2:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [4,"cc",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [5,"cc",2,2,3,1.1,2.1,1590738991000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2=1;
+ sql: SELECT id, c2, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c2 smallint","w1_c4_count bigint"]
+ order: id
+ rows:
+ - [4,2,1]
+ - [5,2,2]
+ -
+ id: 25
+ desc: 数据过期,delete其他pk
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7:1:latest"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='bb';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [3,"aa",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 26
+ desc: 不等式删除
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"cc",1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1!='cc';
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 27
+ desc: 比较运算符删除
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c2>=2;
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 28
+ desc: 表名为job delete
+ inputs:
+ -
+ name: job
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"aa",3,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='aa';
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [2,"bb",2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 29
+ desc: delete空表
+ inputs:
+ -
+ name: job
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - delete from {0} where c1='aa';
+ expect:
+ success: true
+ -
+ id: 30
+ desc: 组合key有一个是null
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,null,2,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1=null and c2=2;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,null,1,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ -
+ id: 31
+ desc: 组合key有一个是空串
+ inputs:
+ -
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1|c2:c7"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [3,"",2,2,3,1.1,2.1,1590738990000,"2020-05-01",true]
+ sqls:
+ - delete from {0} where c1='' and c2=2;
+ - select * from {0};
+ expect:
+ columns: ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+
diff --git a/cases/integration_test/dml/test_insert.yaml b/cases/integration_test/dml/test_insert.yaml
new file mode 100644
index 00000000000..430c4217043
--- /dev/null
+++ b/cases/integration_test/dml/test_insert.yaml
@@ -0,0 +1,232 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 插入所有类型的数据
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 1
+ desc: 插入所有列的数据
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} values('aa',2,3,1.1,2.1,1590738989000L,'2020-05-01');
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string", "c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 2
+ desc: 插入部分列数据
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} (c1,c4,c7) values('aa',2,1590738989000L);
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",null,2,null,null,1590738989000,null]
+ -
+ id: 3
+ desc: 没有指定NotNull的列插入null
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} (c1,c3,c4,c5,c6,c7,c8) values('aa',2,NULL,NULL,NULL,1590738989000L,NULL);
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,null,null,null,1590738989000,null]
+ -
+ id: 4
+ desc: NotNull的列插入null
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string NOT NULL, c2 timestamp,
+ index(key=(c1), ts=c2));
+ sql: insert into {0} (c1,c2,c3) values(NULL,1590738989000L);
+ expect:
+ success: false
+ -
+ id: 5
+ desc: 字符串类型插入空串
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string NOT NULL, c2 timestamp,
+ index(key=(c1), ts=c2));
+ insert: insert into {0} (c1,c2) values('',1590738989000L);
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 timestamp"]
+ rows:
+ - ["",1590738989000]
+ -
+ id: 6
+ desc: 表名不存在
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string NOT NULL, c2 timestamp,
+ index(key=(c1), ts=c2));
+ sql: insert into {0}1 (c1,c2) values('aa',1590738989000L);
+ expect:
+ success: false
+ -
+ id: 7
+ desc: 列名不存在
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string NOT NULL, c2 timestamp,
+ index(key=(c1), ts=c2));
+ sql: insert into {0} (c1,c3) values('aa',1590738989000L);
+ expect:
+ success: false
+ -
+ id: 8
+ desc: 没有指定NotNull的列
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string, c2 timestamp, c3 string NOT NULL,
+ index(key=(c1), ts=c2));
+ sql: insert into {0} (c1,c2) values('aa',1590738989000L);
+ expect:
+ success: false
+ -
+ id: 9
+ desc: 插入的字符串没有引号
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string, c2 timestamp,
+ index(key=(c1), ts=c2));
+ sql: insert into {0} (c1,c2) values(aa,1590738989000L);
+ expect:
+ success: false
+ -
+ id: 10
+ desc: 相同时间戳数据
+ mode: disk-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ order: c2
+ rows:
+ - [ "aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ -
+ id: 11
+ desc: index列为null
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [ null,1,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - [null,1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 12
+ desc: ts列为null
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ sql: insert into {0} values('aa',1,2,3,1.1,2.1,null,'2020-05-01');
+ expect:
+ success: false
+
diff --git a/cases/integration_test/dml/test_insert_prepared.yaml b/cases/integration_test/dml/test_insert_prepared.yaml
new file mode 100644
index 00000000000..f43f5662094
--- /dev/null
+++ b/cases/integration_test/dml/test_insert_prepared.yaml
@@ -0,0 +1,280 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 插入所有类型的数据
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 1
+ desc: 插入所有列的数据
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} values(?,?,?,?,?,?,?);
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string", "c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ -
+ id: 2
+ desc: 插入部分列数据
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} (c1,c4,c7) values(?,?,?);
+ rows:
+ - ["aa",2,1590738989000]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",null,2,null,null,1590738989000,null]
+ -
+ id: 3
+ desc: 没有指定NotNull的列插入null
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} (c1 string, c3 int, c4 bigint, c5 float, c6 double, c7 timestamp, c8 date,
+ index(key=(c1), ts=c7));
+ insert: insert into {0} (c1,c3,c4,c5,c6,c7,c8) values(?,?,?,?,?,?,?);
+ rows:
+ - ["aa",2,null,null,null,1590738989000,null]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,null,null,null,1590738989000,null]
+ -
+ id: 4
+ desc: 字符串类型插入空串
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ create: |
+ create table {0} ( c1 string NOT NULL, c2 timestamp,
+ index(key=(c1), ts=c2));
+ insert: insert into {0} (c1,c2) values(?,?);
+ rows:
+ - ["",1590738989000]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 timestamp"]
+ rows:
+ - ["",1590738989000]
+ -
+ id: 5
+ desc: 相同时间戳数据
+ mode: disk-unsupport
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - ["aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ order: c2
+ rows:
+ - [ "aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",2,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",3,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",4,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",5,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",6,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",7,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",8,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",9,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",10,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",11,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",12,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",13,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",14,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",15,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",16,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",17,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",18,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",19,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ - [ "aa",20,2,3,1.1,2.1,1590738989000,"2020-05-01" ]
+ -
+ id: 6
+ desc: 时间年初
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-01-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-01-01"]
+ -
+ id: 7
+ desc: 时间年末
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-12-31"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-12-31"]
+ -
+ id: 8
+ desc: 时间月初
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-12-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-12-01"]
+ -
+ id: 9
+ desc: 时间月末
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-11-30"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-11-30"]
+ -
+ id: 10
+ desc: 时间2月末
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-02-28"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-02-28"]
+ -
+ id: 11
+ desc: 时间3月初
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-03-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-03-01"]
+ -
+ id: 12
+ desc: 时间1970-01-01
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"1970-01-01"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"1970-01-01"]
+ -
+ id: 13
+ desc: 时间1969-12-31
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"1969-12-31"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"1969-12-31"]
+ -
+ id: 14
+ desc: 时间-0330
+ inputs:
+ -
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-03-30"]
+ sql: select * from {0};
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",1,2,1590738989000,"2020-03-30"]
diff --git a/cases/integration_test/ecosystem/test_kafka.yaml b/cases/integration_test/ecosystem/test_kafka.yaml
new file mode 100644
index 00000000000..a4852ae1938
--- /dev/null
+++ b/cases/integration_test/ecosystem/test_kafka.yaml
@@ -0,0 +1,25 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ json: {"data":[{"c1":"aa","c2":1,"c3":2,"c4":3,"c5":1.1,"c6":2.2,"c7":1590738989000,"c8":1659512628000,"c9":true}],"type":"INSERT"}
+ sql: select * from {table}
+ expect:
+ columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - ["aa",1,2,3,1.1,2.1,1590738989000,"2022-08-03",true]
\ No newline at end of file
diff --git a/cases/integration_test/expression/test_arithmetic.yaml b/cases/integration_test/expression/test_arithmetic.yaml
new file mode 100644
index 00000000000..340f5aa075d
--- /dev/null
+++ b/cases/integration_test/expression/test_arithmetic.yaml
@@ -0,0 +1,685 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+sqlDialect: ["HybridSQL"]
+cases:
+ - id: 0
+ desc: "smallint_[%/MOD/*]_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ sql: select {0}.c2 d[0] {1}.c2 as b2,{0}.c2 d[0] {1}.c3 as b3,{0}.c2 d[0] {1}.c4 as b4,{0}.c2 d[0] {1}.c5 as b5,{0}.c2 d[0] {1}.c6 as b6,{0}.c2 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 1:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333,363,30]
+ 3:
+ rows:
+ - [30,10,0,18.9,17.9,29]
+ 4:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - id: 1
+ desc: "int_算术运算_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ sql: select {0}.c3 d[0] {1}.c2 as b2,{0}.c3 d[0] {1}.c3 as b3,{0}.c3 d[0] {1}.c4 as b4,{0}.c3 d[0] {1}.c5 as b5,{0}.c3 d[0] {1}.c6 as b6,{0}.c3 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 int","b3 int","b4 bigint","b5 float","b6 double","b9 int"]
+ expectProvider:
+ 0:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 1:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333,363,30]
+ 3:
+ rows:
+ - [30,10,0,18.9,17.9,29]
+ 4:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - id: 2
+ desc: "bigint_算术运算_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ sql: select {0}.c4 d[0] {1}.c2 as b2,{0}.c4 d[0] {1}.c3 as b3,{0}.c4 d[0] {1}.c4 as b4,{0}.c4 d[0] {1}.c5 as b5,{0}.c4 d[0] {1}.c6 as b6,{0}.c4 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 bigint","b3 bigint","b4 bigint","b5 float","b6 double","b9 bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 1:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333,363,30]
+ 3:
+ rows:
+ - [30,10,0,18.9,17.9,29]
+ 4:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - id: 3
+ desc: "float_算术运算_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ sql: select {0}.c5 d[0] {1}.c2 as b2,{0}.c5 d[0] {1}.c3 as b3,{0}.c5 d[0] {1}.c4 as b4,{0}.c5 d[0] {1}.c5 as b5,{0}.c5 d[0] {1}.c6 as b6,{0}.c5 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 float","b3 float","b4 float","b5 float","b6 double","b9 float"]
+ expectProvider:
+ 0:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 1:
+ rows:
+ - [NULL,10,0,7.8,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333,363,30]
+ 3:
+ rows:
+ - [30,10,0,18.9,17.9,29]
+ 4:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - id: 4
+ desc: "double_算术运算_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ sql: select {0}.c6 d[0] {1}.c2 as b2,{0}.c6 d[0] {1}.c3 as b3,{0}.c6 d[0] {1}.c4 as b4,{0}.c6 d[0] {1}.c5 as b5,{0}.c6 d[0] {1}.c6 as b6,{0}.c6 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ expectProvider:
+ 0:
+ rows:
+ - [NULL,10,0,7.7999992370605469,5.8,0]
+ 1:
+ rows:
+ - [NULL,10,0,7.7999992370605469,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333.0000114440918,363,30]
+ 3:
+ rows:
+ - [30,10,0,18.899999618530273,17.9,29]
+ 4:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [NULL,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
+ - id: 5
+ desc: "+_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"]
+ sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c5 as b5,d[0] + {1}.c6 as b6,d[0] + {1}.c7 as b7,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expectProvider:
+ 0:
+ columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 smallint"]
+ rows:
+ - [30,50,60,41.1,42.1,1590738989031,31]
+ 1:
+ columns: ["b2 int","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 int"]
+ rows:
+ - [30,50,60,41.1,42.1,1590738989031,31]
+ 2:
+ columns: ["b2 bigint","b3 bigint","b4 bigint","b5 float","b6 double","b7 timestamp","b9 bigint"]
+ rows:
+ - [30,50,60,41.1,42.1,1590738989031,31]
+ 3:
+ columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 bool"]
+ rows:
+ - [0,20,30,11.1,12.1,1590738989001,true]
+ - id: 6
+ desc: "浮点型+_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["{0}.c5","{0}.c6"]
+ sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c5 as b5,d[0] + {1}.c6 as b6,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expectProvider:
+ 0:
+ columns: ["b2 float","b3 float","b4 float","b5 float","b6 double","b9 float"]
+ rows:
+ - [30,50,60,41.100000381469727,42.1,31]
+ 1:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [30,50,60,41.100000381469727,42.1,31]
+ - id: 7
+ desc: "timestamp+_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["{0}.c7"]
+ sql: select d[0] + {1}.c2 as b2,d[0] + {1}.c3 as b3,d[0] + {1}.c4 as b4,d[0] + {1}.c7 as b7,d[0] + {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expectProvider:
+ 0:
+ columns: ["b2 timestamp","b3 timestamp","b4 timestamp","b7 timestamp","b9 timestamp"]
+ rows:
+ - [1590738989000,1590738989020,1590738989030,3181477978001,1590738989001]
+ - id: 8
+ desc: "timestamp_-_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["-"]
+ sql: select {0}.c7 d[0] {1}.c2 as b2,{0}.c7 d[0] {1}.c3 as b3,{0}.c7 d[0] {1}.c4 as b4,{0}.c7 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expectProvider:
+ 0:
+ columns: ["b2 timestamp","b3 timestamp","b4 timestamp","b9 timestamp"]
+ rows:
+ - [1590738989000,1590738988980,1590738988970,1590738988999]
+ - id: 9
+ desc: "整型_[%MOD*-/]_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","-","/"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"]
+ - ["{1}.c7","{1}.c8","{1}.c1"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 10
+ desc: "整型_+_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["+"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"]
+ - ["{1}.c8","{1}.c1"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 11
+ desc: "各种类型_[%MOD*/]_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["%","MOD","*","/"]
+ - ["{0}.c7","{0}.c8","{0}.c1"]
+ - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 12
+ desc: "timetamp_-_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["-"]
+ - ["{0}.c7"]
+ - ["{1}.c1","{1}.c7","{1}.c8"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 13
+ desc: "timetamp_+_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["+"]
+ - ["{0}.c7"]
+ - ["{1}.c1","{1}.c8"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 14
+ desc: "date/string_[+-]_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["+","-"]
+ - ["{0}.c8","{0}.c1"]
+ - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 15
+ desc: "-_整型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ - [2,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ sql: select id, - {0}.c2 as b2,- {0}.c3 as b3,- {0}.c4 as b4,- {0}.c5 as b5,- {0}.c6 as b6,- {0}.c9 as b9 from {0};
+ expect:
+ order: id
+ columns: ["id bigint", "b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 bool"]
+ rows:
+ - [1,-30,30,-30,-30,-30,true]
+ - [2,-30,30,-30,-30,-30,false]
+ - id: 16
+ desc: "-_其他类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ sql: select - d[0] as b2 from {0};
+ dataProvider:
+ - ["{0}.c7","{0}.c8","{0}.c1"]
+ expect:
+ success: false
+ - id: 17
+ desc: "int_DIV_int_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"]
+ sql: select d[0] DIV {1}.c2 as b2,d[0] DIV {1}.c3 as b3,d[0] DIV {1}.c4 as b4,d[0] DIV {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expectProvider:
+ 0:
+ columns: ["b2 smallint","b3 int","b4 bigint","b9 smallint"]
+ rows:
+ - [null,1,1,null]
+ 1:
+ columns: ["b2 int","b3 int","b4 bigint","b9 int"]
+ rows:
+ - [null,1,1,null]
+ 2:
+ columns: ["b2 bigint","b3 bigint","b4 bigint","b9 bigint"]
+ rows:
+ - [null,1,1,null]
+ 3:
+ columns: ["b2 smallint","b3 int","b4 bigint","b9 bool"]
+ rows:
+ - [null,0,0,null]
+ - id: 18
+ desc: "int_DIV_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["DIV"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c9"]
+ - ["{1}.c1","{1}.c5","{1}.c6","{1}.c7","{1}.c8"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 19
+ desc: "各种类型_DIV_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["DIV"]
+ - ["{1}.c1","{1}.c5","{1}.c6","{1}.c7","{1}.c8"]
+ - ["{1}.c1","{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9"]
+ sql: select d[1] d[0] d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+
+ - id: 19
+ desc: 算数表达式操作数为null时返回null
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint",
+ "c4 float","c5 double", "c6 timestamp", "c7 bool", "nullcol int32"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [1, 911, 1024, 3.14, 0.99, 1590738989000, true, NULL]
+ sql: select
+ c1 + nullcol as r1, c1 - nullcol as r2, c1 * nullcol as r3, c1 / nullcol as r4, c1 % nullcol as r5, c1 DIV nullcol as r6,
+ c2 + nullcol as r7, c2 - nullcol as r8, c2 * nullcol as r9, c2 / nullcol as r10, c2 % nullcol as r11, c2 DIV nullcol as r12,
+ c3 + nullcol as r13, c3 - nullcol as r14, c3 * nullcol as r15, c3 / nullcol as r16, c3 % nullcol as r17, c3 DIV nullcol as r18,
+ c4 + nullcol as r19, c4 - nullcol as r20, c4 * nullcol as r21, c4 / nullcol as r22, c4 % nullcol as r23,
+ c5 + nullcol as r25, c5 - nullcol as r26, c5 * nullcol as r27, c5 / nullcol as r28, c5 % nullcol as r29,
+ year(c6) + nullcol as r31, year(c6) - nullcol as r32, year(c6) * nullcol as r33, year(c6) / nullcol as r34, year(c6) % nullcol as r35, year(c6) DIV nullcol as r36,
+ -nullcol as r37,
+ c7 + nullcol as r38, c7 - nullcol as r39, c7 * nullcol as r40, c7 / nullcol as r41, c7 % nullcol as r42
+ from {0};
+ expect:
+ columns: ["r1 int32", "r2 int32", "r3 int32", "r4 double", "r5 int32", "r6 int32",
+ "r7 int32", "r8 int32", "r9 int32", "r10 double", "r11 int32", "r12 int32",
+ "r13 bigint", "r14 bigint", "r15 bigint", "r16 double", "r17 bigint", "r18 bigint",
+ "r19 float", "r20 float", "r21 float", "r22 double", "r23 float",
+ "r25 double", "r26 double", "r27 double", "r28 double", "r29 double",
+ "r31 int32", "r32 int32", "r33 int32", "r34 double", "r35 int32", "r36 int32", "r37 int32",
+ "r38 int32", "r39 int32", "r40 int32","r41 double","r42 int32"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL]
+ - id: 20
+ desc: 算数表达式操作数为const null时返回null-left
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint",
+ "c4 float","c5 double", "c6 timestamp", "c7 bool", "colnull int32"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [1, 911, 1024, 3.14, 0.99, 1590738989000, true, NULL]
+ sql: select
+ NULL + c1 as r1, NULL - c1 as r2, NULL * c1 as r3, NULL / c1 as r4, NULL % c1 as r5, NULL DIV c1 as r6,
+ NULL + c2 as r7, NULL - c2 as r8, NULL * c2 as r9, NULL / c2 as r10, NULL % c2 as r11, NULL DIV c2 as r12,
+ NULL + c3 as r13, NULL - c3 as r14, NULL * c3 as r15, NULL / c3 as r16, NULL % c3 as r17, NULL DIV c3 as r18,
+ NULL + c4 as r19, NULL - c4 as r20, NULL * c4 as r21, NULL / c4 as r22, NULL % c4 as r23,
+ NULL + c5 as r25, NULL - c5 as r26, NULL * c5 as r27, NULL / c5 as r28, NULL % c5 as r29,
+ year(c6) + NULL as r31, year(c6) - NULL as r32, year(c6) * NULL as r33, year(c6) / NULL as r34, year(c6) % NULL as r35, year(c6) DIV NULL as r36,
+ NULL as r37,
+ c7 + NULL as r38, c7 - NULL as r39, c7 * NULL as r40, c7 / NULL as r41, c7 % NULL as r42
+ from {0};
+ expect:
+ columns: ["r1 int16", "r2 int16", "r3 int16", "r4 double", "r5 int16", "r6 int16",
+ "r7 int32", "r8 int32", "r9 int32", "r10 double", "r11 int32", "r12 int32",
+ "r13 bigint", "r14 bigint", "r15 bigint", "r16 double", "r17 bigint", "r18 bigint",
+ "r19 float", "r20 float", "r21 float", "r22 double", "r23 float",
+ "r25 double", "r26 double", "r27 double", "r28 double", "r29 double",
+ "r31 int32", "r32 int32", "r33 int32", "r34 double", "r35 int32", "r36 int32", "r37 bool",
+ "r38 bool", "r39 bool", "r40 bool","r41 double","r42 bool"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL]
+ - id: bitwise_operators
+ desc: bitwise and/or/xor
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint", "c6 timestamp"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [3, 6, 12, 1590738989000]
+ dataProvider:
+ - ['&', '|', '^']
+ sql: |
+ select c1 d[0] c1 as r11, c1 d[0] c2 as r12, c1 d[0] c3 as r13, c2 d[0] c2 as r22, c2 d[0] c3 as r23, c3 d[0] c3 as r33 from {0};
+ expect:
+ columns: [ 'r11 int16', 'r12 int32', 'r13 bigint', 'r22 int32', 'r23 bigint', 'r33 bigint' ]
+ expectProvider:
+ 0:
+ rows:
+ - [ 3, 2, 0, 6, 4, 12 ]
+ 1:
+ rows:
+ - [ 3, 7, 15, 6, 14, 12 ]
+ 2:
+ rows:
+ - [ 0, 5, 15, 0, 10, 0 ]
+ - id: bitwise_operators_fail
+ desc: bitwise and/or/xor, fail on non-integral operands
+ inputs:
+ - columns: [ "c0 int", "c1 bool", "c2 float", "c3 double", "c4 string", "c5 date", "c6 timestamp" ]
+ indexs: ["index1:c0:c6"]
+ rows:
+ - [1, true, 1.0, 2.0, "abc", "2012-8-11", 1590738989000]
+ sql: |
+ select d[1] d[0] 10 as r1 from {0};
+ dataProvider:
+ - ['&', '|', '^']
+ - [ '{0}.c1', '{0}.c2', '{0}.c3', '{0}.c4', '{0}.c5', '{0}.c6' ]
+ expect:
+ success: false
+ - id: bitwise_operators_not
+ desc: bitwise not
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint", "c6 timestamp"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [3, 6, 12, 1590738989000]
+ sql: |
+ select ~c1 as r1, ~c2 as r2, ~c3 as r3 from {0};
+ expect:
+ columns: [ 'r1 int16', 'r2 int32', 'r3 bigint']
+ rows:
+ - [ -4, -7, -13 ]
+ - id: bitwise_not_fail
+ desc: bitwise not, fail on non-integral operand
+ inputs:
+ - columns: [ "c0 int", "c1 bool", "c2 float", "c3 double", "c4 string", "c5 date", "c6 timestamp" ]
+ indexs: ["index1:c0:c6"]
+ rows:
+ - [1, true, 1.0, 2.0, "abc", "2012-8-11", 1590738989000]
+ sql: |
+ select d[0] d[1] as r1 from {0};
+ dataProvider:
+ - ['~']
+ - [ '{0}.c1', '{0}.c2', '{0}.c3', '{0}.c4', '{0}.c5', '{0}.c6' ]
+ expect:
+ success: false
+ - id: bitwise_null_operands
+ desc: bitwise operation return null if any of operands is null
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint", "c4 int16", "c6 timestamp"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [3, 6, 12, NULL, 1590738989000]
+ sql: |
+ select {0}.c1 & {0}.c4 as r1, {0}.c2 | {0}.c4 as r2, {0}.c3 ^ {0}.c4 as r3, ~ {0}.c4 as r4 from {0};
+ expect:
+ columns: [ 'r1 int16', 'r2 int32', 'r3 int64', 'r4 int16' ]
+ rows:
+ - [ NULL, NULL, NULL, NULL ]
+ - id: bitwise_const_null_operands
+ desc: bitwise operation return null if any of operands is null
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint", "c4 int", "c6 timestamp"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [3, 6, 12, NULL, 1590738989000]
+ sql: |
+ select {0}.c1 & NULL as r1, {0}.c2 | NULL as r2, {0}.c3 ^ NULL as r3, ~ NULL as r4 from {0};
+ expect:
+ columns: [ 'r1 int16', 'r2 int32', 'r3 int64', 'r4 bool' ]
+ rows:
+ - [ NULL, NULL, NULL, NULL ]
diff --git a/cases/integration_test/expression/test_condition.yaml b/cases/integration_test/expression/test_condition.yaml
new file mode 100644
index 00000000000..54d1dd4ad4d
--- /dev/null
+++ b/cases/integration_test/expression/test_condition.yaml
@@ -0,0 +1,400 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: SIMPLE CASE WHEN 表达式
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case col2
+ when 'aa' then 'apple'
+ else 'nothing'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", "nothing"]
+ - [3, "bb", "nothing"]
+ - [4, "dd", "nothing"]
+ - id: 1
+ desc: SIMPLE CASE WHEN 表达式无ELSE
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case col2
+ when 'aa' then 'apple'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", null]
+ - [3, "bb", null]
+ - [4, "dd", null]
+ - id: 2
+ desc: SIMPLE CASE WHEN 表达式 ELSE NULL
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case col2
+ when 'aa' then 'apple'
+ else NULL
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", null]
+ - [3, "bb", null]
+ - [4, "dd", null]
+ - id: 3
+ desc: SIMPLE CASE WHEN 表达式 THEN NULL
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ - [5, null ,1590738989000]
+ sql: |
+ select col1, col2, case col2
+ when 'aa' then 'apple'
+ when 'bb' then NULL
+ when 'cc' then 'cake'
+ else 'nothing'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", "cake"]
+ - [3, "bb", null]
+ - [4, "dd", "nothing"]
+ - [5, null, "nothing"]
+ - id: 4
+ desc: SEARCHED CASE WHEN 表达式
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case
+ when col2='aa' then 'apple'
+ when col2='bb' then 'banana'
+ when col2='cc' then 'cake'
+ else 'nothing'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", "cake"]
+ - [3, "bb", "banana"]
+ - [4, "dd", "nothing"]
+ - id: 5
+ desc: SEARCHED CASE WHEN 表达式无 ELSE
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case
+ when col2='aa' then 'apple'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", null]
+ - [3, "bb", null]
+ - [4, "dd", null]
+ - id: 6
+ desc: SEARCHED CASE WHEN 表达式 ELSE
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, "cc",1590738989000]
+ - [3, "bb",1590738989000]
+ - [4, "dd",1590738989000]
+ sql: |
+ select col1, col2, case
+ when col2='aa' then 'apple'
+ when col2='bb' then 'banana'
+ when col2='cc' then 'cake'
+ else 'nothing'
+ end as case_f1 from {0};
+ expect:
+ columns: ["col1 int", "col2 string", "case_f1 string"]
+ order: col1
+ rows:
+ - [1, "aa", "apple"]
+ - [2, "cc", "cake"]
+ - [3, "bb", "banana"]
+ - [4, "dd", "nothing"]
+ - id: 7
+ desc: 条件表达式null测试
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id int64", "c1 bool", "c2 string", "c3 string"]
+ indexs: ["index1:c1:id"]
+ rows:
+ - [1, true, "xxx", "aaa"]
+ - [2, true, "xxx", NULL]
+ - [3, true, NULL, "aaa"]
+ - [4, true, NULL, NULL]
+ - [5, false, "xxx", "aaa"]
+ - [6, false, "xxx", NULL]
+ - [7, false, NULL, "aaa"]
+ - [8, false, NULL, NULL]
+ - [9, NULL, "xxx", "aaa"]
+ - [10, NULL, "xxx", NULL]
+ - [11, NULL, NULL, "aaa"]
+ - [12, NULL, NULL, NULL]
+ sql: select id, case when c1 then c2 else c3 end as result from {0};
+ expect:
+ columns: ["id int64", "result string"]
+ order: id
+ rows:
+ - [1, "xxx"]
+ - [2, "xxx"]
+ - [3, NULL]
+ - [4, NULL]
+ - [5, "aaa"]
+ - [6, NULL]
+ - [7, "aaa"]
+ - [8, NULL]
+ - [9, "aaa"]
+ - [10, NULL]
+ - [11, "aaa"]
+ - [12, NULL]
+ - id: 8
+ desc: IFNULL
+ sqlDialect: ["HybridSQL"]
+ mode: cli-unsupport
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, null,1590738989000]
+ - [3, "",1590738989000]
+ sql: |
+ select col1,ifnull(col2,"abc") as e1 from {0};
+ expect:
+ columns: ["col1 int", "e1 string"]
+ order: col1
+ rows:
+ - [1, "aa"]
+ - [2, "abc"]
+ - [3, ""]
+ - id: 9
+ desc: IFNULL-不同类型
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,ifnull(col2,"abc") as e1 from {0};
+ expect:
+ success: false
+ - id: 10
+ desc: IFNULL-表达式
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,ifnull(col2,100) as e1,ifnull(col2+1,100) as e2 from {0};
+ expect:
+ columns: ["col1 int", "e1 int", "e2 int"]
+ order: col1
+ rows:
+ - [1, 0,1]
+ - [2, 100,100]
+ - [3, 1,2]
+ - id: 11-1
+ desc: IFNULL-表达式-/0
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,ifnull(col2 /0 ,100) as e3 from {0};
+ expect:
+ success: false
+ - id: 11-2
+ mode: cli-unsupport
+ desc: NVL is synonyms to ifnull
+ inputs:
+ - columns: ["col1 int","col2 string", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, "aa",1590738989000]
+ - [2, null,1590738989000]
+ - [3, "",1590738989000]
+ sql: |
+ select col1,nvl(col2,"abc") as e1 from {0};
+ expect:
+ columns: ["col1 int", "e1 string"]
+ order: col1
+ rows:
+ - [1, "aa"]
+ - [2, "abc"]
+ - [3, ""]
+ - id: 11-3
+ desc: NVL-表达式-/0
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,nvl(col2 /0 ,100) as e3 from {0};
+ expect:
+ success: false
+ - id: 12
+ desc: IFNULL-兼容类型
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["col1 int","col2 bigint", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,ifnull(col2,100) as e1 from {0};
+ expect:
+ success: false
+ - id: 13
+ desc: IFNULL-浮点型
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["col1 int","col2 bigint", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,ifnull(col2,1.1) as e2 from {0};
+ expect:
+ success: false
+
+ - id: NVL2-1
+ desc: NVL2
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,nvl2(col2, "abc", "def") as e1 from {0};
+ expect:
+ columns: ["col1 int", "e1 string"]
+ order: col1
+ rows:
+ - [1, "abc"]
+ - [2, "def"]
+ - [3, "abc"]
+
+ - id: NVL2-2
+ desc: NVL2, type not match
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1,nvl2(col2, "abc", col1 + 1) as e1 from {0};
+ expect:
+ success: false
+
+ - id: NVL2-3
+ desc: NVL2, sub expression
+ inputs:
+ - columns: ["col1 int","col2 int", "col4 timestamp"]
+ indexs: ["index1:col1:col4"]
+ rows:
+ - [1, 0,1590738989000]
+ - [2, null,1590738989000]
+ - [3, 1,1590738989000]
+ sql: |
+ select col1, nvl2(col2, col1 * col1, col1 + 1) as e1 from {0};
+ expect:
+ columns: ["col1 int", "e1 int"]
+ order: col1
+ rows:
+ - [1, 1]
+ - [2, 3]
+ - [3, 9]
\ No newline at end of file
diff --git a/cases/integration_test/expression/test_like.yaml b/cases/integration_test/expression/test_like.yaml
new file mode 100644
index 00000000000..d47bb57b616
--- /dev/null
+++ b/cases/integration_test/expression/test_like.yaml
@@ -0,0 +1,1138 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: "使用_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"a_b",30,30,30,30.0,30.0,1590738990000,"2020-05-01",false]
+ - [2,"aab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false]
+ - [3,"a%b",30,30,30,30.0,30.0,1590738992000,"2020-05-01",false]
+ - [4,"b_c",30,30,30,30.0,30.0,1590738993000,"2020-05-01",false]
+ - [5,"abc",30,30,30,30.0,30.0,1590738994000,"2020-05-01",false]
+ - [6,"A0b",30,30,30,30.0,30.0,1590738995000,"2020-05-01",false]
+ - [7,"a#B",30,30,30,30.0,30.0,1590738996000,"2020-05-01",false]
+ - [8,"aaab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] 'a_b' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aab",true]
+ - [3,"a%b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",false]
+ - [7,"a#B",false]
+ - [8,"aaab",false]
+ 1:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab",false]
+ - [3,"a%b",false]
+ - [4,"b_c",true]
+ - [5,"abc",true]
+ - [6,"A0b",true]
+ - [7,"a#B",true]
+ - [8,"aaab",true]
+ 2:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aab",true]
+ - [3,"a%b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",true]
+ - [7,"a#B",true]
+ - [8,"aaab",false]
+ 3:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab",false]
+ - [3,"a%b",false]
+ - [4,"b_c",true]
+ - [5,"abc",true]
+ - [6,"A0b",false]
+ - [7,"a#B",false]
+ - [8,"aaab",true]
+ - id: 1
+ desc: "使用%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"a%_b",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"abc",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#B",1590738996000]
+ - [8,"aaab",1590738997000]
+ - [9,"ab",1590738998000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] 'a%b' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",false]
+ - [7,"a#B",false]
+ - [8,"aaab",true]
+ - [9,"ab",true]
+ 1:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aabb",false]
+ - [3,"a%_b",false]
+ - [4,"b_c",true]
+ - [5,"abc",true]
+ - [6,"A0b",true]
+ - [7,"a#B",true]
+ - [8,"aaab",false]
+ - [9,"ab",false]
+ 2:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",true]
+ - [7,"a#B",true]
+ - [8,"aaab",true]
+ - [9,"ab",true]
+ 3:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aabb",false]
+ - [3,"a%_b",false]
+ - [4,"b_c",true]
+ - [5,"abc",true]
+ - [6,"A0b",false]
+ - [7,"a#B",false]
+ - [8,"aaab",false]
+ - [9,"ab",false]
+ - id: 2
+ desc: "同时使用%和_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '_a%b' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",false]
+ - id: 3
+ desc: "使用默认的escape"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] "\\_a%b" ESCAPE "\\" as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",false]
+ - id: 4
+ desc: "指定escape为#"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '#_a%b' ESCAPE '#' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",false]
+ - id: 5
+ desc: "指定escape为_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '__a%b' ESCAPE '_' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",false]
+ - id: 6
+ desc: "指定escape为%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA%b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '_a%%b' ESCAPE '%' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA%b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",true]
+ - [7,"_a#0B",false]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA%b",false]
+ - [7,"_a#0B",true]
+ - id: 7
+ desc: "escape不指定"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] "\\_a%b" as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"_a#0B",false]
+ - id: 8
+ desc: "escape为空串,使用\\"
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,'\\\%a_b',1590738990000]
+ - [2,'\\\aabb',1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,'\\\bA0b',1590738995000]
+ - [7,'\\\_a#0B',1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] "\\_a%b" escape "" as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,'\%a_b',true]
+ - [2,'\aabb',true]
+ - [3,'_a%_b',false]
+ - [4,'ba_c',false]
+ - [5,"abb",false]
+ - [6,'\bA0b',false]
+ - [7,'\_a#0B',false]
+ 1:
+ rows:
+ - [1,'\%a_b',false]
+ - [2,'\aabb',false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,'\bA0b',true]
+ - [7,'\_a#0B',true]
+ 2:
+ rows:
+ - [1,'\%a_b',true]
+ - [2,'\aabb',true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,'\bA0b',true]
+ - [7,'\_a#0B',true]
+ 3:
+ rows:
+ - [1,'\%a_b',false]
+ - [2,'\aabb',false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,'\bA0b',false]
+ - [7,'\_a#0B',false]
+ - id: 9
+ desc: "使用两个%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"a%_b%0",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"abc",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#Bb",1590738996000]
+ - [8,"aaabbcc",1590738991000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] 'a%b%' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",false]
+ - [5,"abc",true]
+ - [6,"A0b",false]
+ - [7,"a#Bb",true]
+ - [8,"aaabbcc",true]
+ 1:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aabb",false]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",true]
+ - [5,"abc",false]
+ - [6,"A0b",true]
+ - [7,"a#Bb",false]
+ - [8,"aaabbcc",false]
+ 2:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",false]
+ - [5,"abc",true]
+ - [6,"A0b",true]
+ - [7,"a#Bb",true]
+ - [8,"aaabbcc",true]
+ 3:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aabb",false]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",true]
+ - [5,"abc",false]
+ - [6,"A0b",false]
+ - [7,"a#Bb",false]
+ - [8,"aaabbcc",false]
+ - id: 10
+ desc: "使用两个_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '_a_b' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",false]
+ 3:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",true]
+ - id: 11
+ desc: "使用两个%,其中一个被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aab%",1590738991000]
+ - [3,"a%_b%0",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"ab%",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#B%",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] 'a%b#%' escape '#' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab%",true]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",false]
+ - [5,"ab%",true]
+ - [6,"A0b",false]
+ - [7,"a#B%",false]
+ 1:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aab%",false]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",true]
+ - [5,"ab%",false]
+ - [6,"A0b",true]
+ - [7,"a#B%",true]
+ 2:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab%",true]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",false]
+ - [5,"ab%",true]
+ - [6,"A0b",false]
+ - [7,"a#B%",true]
+ 3:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aab%",false]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",true]
+ - [5,"ab%",false]
+ - [6,"A0b",true]
+ - [7,"a#B%",false]
+ - id: 12
+ desc: "使用两个_,其中一个被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"_A0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '#_a_b' escape '#' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"_A0b",true]
+ - [7,"aa#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",true]
+ - [7,"aa#0B",false]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"_A0b",false]
+ - [7,"aa#0B",true]
+ - id: 13
+ desc: "同时使用%和_,其中_被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"_A0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '#_a%b' escape '#' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"_A0b",true]
+ - [7,"_a#0B",true]
+ 2:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",true]
+ - [7,"_a#0B",true]
+ 3:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"_A0b",false]
+ - [7,"_a#0B",false]
+ - id: 14
+ desc: "同时使用%和_,其中%被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a%b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA%b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1,c1 d[0] '_a#%b' escape '#' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a%b",true]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a%b",false]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA%b",true]
+ - [7,"aa#0B",true]
+ 2:
+ rows:
+ - [1,"%a%b",true]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",true]
+ - [7,"aa#0B",false]
+ 3:
+ rows:
+ - [1,"%a%b",false]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",true]
+ - [5,"abb",true]
+ - [6,"bA%b",false]
+ - [7,"aa#0B",true]
+ - id: 15
+ desc: "列中有null和空串"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,null,1590738991000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] 'a%b' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","v1 bool"]
+ rows:
+ - [1,false]
+ - [2,null]
+ expectProvider:
+ 1:
+ rows:
+ - [1,true]
+ - [2,null]
+ 3:
+ rows:
+ - [1,true]
+ - [2,null]
+ - id: 16
+ desc: "使用空串"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '' as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","v1 bool"]
+ rows:
+ - [1,true]
+ - [2,false]
+ expectProvider:
+ 1:
+ rows:
+ - [1,false]
+ - [2,true]
+ 3:
+ rows:
+ - [1,false]
+ - [2,true]
+ - id: 17
+ desc: "使用null"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] null as v1 from {0};
+ expect:
+ success: false
+ - id: 18
+ desc: "escape使用null"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] 'a%' escape null as v1 from {0};
+ expect:
+ success: false
+ - id: 19
+ desc: "int类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 int","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 20
+ desc: "bigint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bigint","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 21
+ desc: "smallint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 smallint","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 22
+ desc: "float类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 float","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12.0,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 23
+ desc: "double类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 double","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12.0,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 24
+ desc: "timestamp类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 timestamp","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 25
+ desc: "date类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 date","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"2012-05-01",1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 26
+ desc: "bool类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bool","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,true,1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 27
+ desc: "列不存在"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c2 d[0] '1%' as v1 from {0};
+ expect:
+ success: false
+ - id: 28
+ desc: "escape为多个字符"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] 'a%' escape '<>' as v1 from {0};
+ expect:
+ success: false
+ - id: 29
+ desc: "pattern以escape character结尾"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"ab#",1590738990000]
+ - [2,"aa",1590738991000]
+ dataProvider:
+ - ["like","not like","ilike","not ilike"]
+ sql: select id,c1 d[0] 'a%#' escape '#' as v1 from {0};
+ expect:
+ success: true
+ columns : ["id bigint","v1 bool"]
+ rows:
+ - [1,false]
+ - [2,false]
+ expectProvider:
+ 1:
+ rows:
+ - [1,true]
+ - [2,true]
+ 3:
+ rows:
+ - [1,true]
+ - [2,true]
diff --git a/cases/integration_test/expression/test_logic.yaml b/cases/integration_test/expression/test_logic.yaml
new file mode 100644
index 00000000000..d1ce41b7825
--- /dev/null
+++ b/cases/integration_test/expression/test_logic.yaml
@@ -0,0 +1,135 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+sqlDialect: ["HybridSQL"]
+cases:
+ - id: 0
+ desc: "各种类型_逻辑运算_各种类型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",true]
+ - [2,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",true]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ - [2,"",0,0,0,0.0,0.0,0,null,true]
+ dataProvider:
+ - ["AND","OR","XOR"]
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: select d[1] d[0] {1}.c1 as b1,d[1] d[0] {1}.c2 as b2,d[1] d[0] {1}.c3 as b3,d[1] d[0] {1}.c4 as b4,d[1] d[0] {1}.c5 as b5,d[1] d[0] {1}.c6 as b6,d[1] d[0] {1}.c7 as b7,d[1] d[0] {1}.c8 as b8,d[1] d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b7 bool","b8 bool","b9 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [true,true,true,true,true,true,true,true,false]
+ - [false,false,false,false,false,false,false,null,true]
+ 1:
+ rows:
+ - [true,true,true,true,true,true,true,true,true]
+ - [true,true,true,true,true,true,true,true,true]
+ 2:
+ rows:
+ - [false,false,false,false,false,false,false,false,true]
+ - [true,true,true,true,true,true,true,null,false]
+ - id: 1
+ desc: "各种类型_逻辑非_各种类型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ - [2,"",0,0,0,0.0,0.0,0,null,true]
+ dataProvider:
+ - ["NOT","!"]
+ sql: select d[0] {0}.c1 as b1,d[0] {0}.c2 as b2,d[0] {0}.c3 as b3,d[0] {0}.c4 as b4,d[0] {0}.c5 as b5,d[0] {0}.c6 as b6,d[0] {0}.c7 as b7,d[0] {0}.c8 as b8,d[0] {0}.c9 as b9 from {0};
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b7 bool","b8 bool","b9 bool"]
+ rows:
+ - [false,false,false,false,false,false,false,false,true]
+ - [true,true,true,true,true,true,true,null,false]
+ - id: 2
+ desc: 三值bool逻辑
+ inputs:
+ - columns: ["id int64", "tt int64", "c1 bool", "c2 bool"]
+ indexs: ["index1:id:tt"]
+ rows:
+ - [1, 1, true, true]
+ - [2, 2, true, false]
+ - [3, 3, true, NULL]
+ - [4, 4, false, true]
+ - [5, 5, false, false]
+ - [6, 6, false, NULL]
+ - [7, 7, NULL, true]
+ - [8, 8, NULL, false]
+ - [9, 9, NULL, NULL]
+ sql: select id, c1, c2, c1 and c2 as c_and, c1 or c2 as c_or, c1 xor c2 as c_xor, not c1 as c_not from {0};
+ expect:
+ order: id
+ columns: ["id int64", "c1 bool", "c2 bool", "c_and bool", "c_or bool", "c_xor bool", "c_not bool"]
+ rows:
+ - [1, true, true, true, true, false, false]
+ - [2, true, false, false, true, true, false]
+ - [3, true, NULL, NULL, true, NULL, false]
+ - [4, false, true, false, true, true, true]
+ - [5, false, false, false, false, false, true]
+ - [6, false, NULL, false, NULL, NULL, true]
+ - [7, NULL, true, NULL, true, NULL, NULL]
+ - [8, NULL, false, false, NULL, NULL, NULL]
+ - [9, NULL, NULL, NULL, NULL, NULL, NULL]
+ - id: 3
+ desc: 逻辑表达式不使用布尔表达式
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000,"2020-05-01"]
+ sql: select c2=2 and (c2-1) as f1 from {0};
+ expect:
+ columns: ["f1 bool"]
+ rows:
+ - [true]
+ - id: 4
+ desc: 逻辑表达式不使用布尔表达式!
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000,"2020-05-01"]
+ sql: select !c2 as not_c2 from {0};
+ expect:
+ columns: ["not_c2 bool"]
+ rows:
+ - [false]
+ - id: 5
+ desc: 逻辑表达式不使用布尔表达式-常量
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp","c5 date"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000,"2020-05-01"]
+ sql: select c2==2 and false as flag1,!true as flag2 from {0};
+ expect:
+ columns: ["flag1 bool", "flag2 bool"]
+ rows:
+ - [false,false]
diff --git a/cases/integration_test/expression/test_predicate.yaml b/cases/integration_test/expression/test_predicate.yaml
new file mode 100644
index 00000000000..db183a878e7
--- /dev/null
+++ b/cases/integration_test/expression/test_predicate.yaml
@@ -0,0 +1,778 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+sqlDialect: ["HybridSQL"]
+cases:
+ - id: 0
+ desc: "string_比较运算_各种类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ sql: select {0}.c1 d[0] {1}.c1 as b1,{0}.c1 d[0] {1}.c2 as b2,{0}.c1 d[0] {1}.c3 as b3,{0}.c1 d[0] {1}.c4 as b4,{0}.c1 d[0] {1}.c5 as b5,{0}.c1 d[0] {1}.c6 as b6,{0}.c1 d[0] {1}.c7 as b7,{0}.c1 d[0] {1}.c8 as b8,{0}.c1 d[0] {1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b7 bool", "b8 bool", "b9 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [false,false,false,false,false,false,false,false,false]
+ 1:
+ rows:
+ - [false,true,false,false,false,false,false,false,false]
+ 2:
+ rows:
+ - [true,false,true,true,true,true,true,true,true]
+ 3:
+ rows:
+ - [true,true,true,true,true,true,true,true,true]
+ 4:
+ rows:
+ - [true,false,true,true,true,true,true,true,true]
+ 5:
+ rows:
+ - [true,false,true,true,true,true,true,true,true]
+ 6:
+ rows:
+ - [false,true,false,false,false,false,false,false,false]
+ 7:
+ rows:
+ - [false,true,false,false,false,false,false,false,false]
+ - id: 1
+ desc: "整型_比较运算_各种类型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ sql: select d[1]d[0]{1}.c1 as b1,d[1]d[0]{1}.c2 as b2,d[1]d[0]{1}.c3 as b3,d[1]d[0]{1}.c4 as b4,d[1]d[0]{1}.c5 as b5,d[1]d[0]{1}.c6 as b6,d[1]d[0]{1}.c9 as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool","b9 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [false,false,false,false,false,false,true]
+ 1:
+ rows:
+ - [false,true,false,false,false,false,true]
+ 2:
+ rows:
+ - [true,false,true,true,true,true,false]
+ 3:
+ rows:
+ - [true,true,true,true,true,true,false]
+ 4:
+ rows:
+ - [true,false,true,true,true,true,true]
+ 5:
+ rows:
+ - [true,false,true,true,true,true,true]
+ 6:
+ rows:
+ - [false,true,false,false,false,false,false]
+ 7:
+ rows:
+ - [false,true,false,false,false,false,false]
+ - id: 2
+ desc: "整型_比较运算_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"]
+ - ["{1}.c7","{1}.c8"]
+ sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 3
+ desc: "时间类型_比较运算_各种类型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"10",1,2,3,1.1,2.1,1590738989001,"2020-05-02",true]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"2020-05-29 15:56:29",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ - [2,"2020-05-02",10,20,30,11.1,12.1,1590738989001,"2020-05-02",false]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ sql: select {0}.c8 d[0] {1}.c1 as b1,{0}.c8 d[0] {1}.c8 as b2,{0}.c7 d[0] {1}.c1 as b3,{0}.c7 d[0] {1}.c7 as b4 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [false,false,false,false]
+ - [false,false,true,false]
+ 1:
+ rows:
+ - [false,false,true,false]
+ - [true,true,true,true]
+ 2:
+ rows:
+ - [true,true,false,true]
+ - [false,false,false,false]
+ 3:
+ rows:
+ - [true,true,true,true]
+ - [true,true,false,true]
+ 4:
+ rows:
+ - [true,true,false,true]
+ - [false,false,true,false]
+ 5:
+ rows:
+ - [true,true,false,true]
+ - [false,false,true,false]
+ 6:
+ rows:
+ - [false,false,true,false]
+ - [true,true,false,true]
+ 7:
+ rows:
+ - [false,false,true,false]
+ - [true,true,false,true]
+ - id: 4
+ desc: "timestamp_比较运算_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ - ["{1}.c7"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9","{1}.c8"]
+ sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 5
+ desc: "date_比较运算_各种类型_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",10,10,10,10.0,10.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",10,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ - ["{1}.c8"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9","{1}.c7"]
+ sql: select d[1]d[0]d[2] as b1 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ success: false
+ - id: 6
+ desc: "bool_比较运算_各种类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool","c10 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"1",1,20,30,11.1,12.1,1590738989001,"2020-05-02",false,true]
+ dataProvider:
+ - [">",">=","<","<=","<>","!=","=","=="]
+ sql: select {0}.c9 d[0] {1}.c1 as b1,{0}.c9 d[0] {1}.c2 as b2,{0}.c9 d[0] {1}.c3 as b3,{0}.c9 d[0] {1}.c4 as b4,{0}.c9 d[0] {1}.c5 as b5,{0}.c9 d[0] {1}.c6 as b6,{0}.c9 d[0] {1}.c9 as b9,{0}.c9 d[0] {1}.c10 as b10 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b9 bool","b10 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [true,false,false,false,false,false,true,false]
+ 1:
+ rows:
+ - [true,true,false,false,false,false,true,true]
+ 2:
+ rows:
+ - [false,false,true,true,true,true,false,false]
+ 3:
+ rows:
+ - [false,true,true,true,true,true,false,true]
+ 4:
+ rows:
+ - [true,false,true,true,true,true,true,false]
+ 5:
+ rows:
+ - [true,false,true,true,true,true,true,false]
+ 6:
+ rows:
+ - [false,true,false,false,false,false,false,true]
+ 7:
+ rows:
+ - [false,true,false,false,false,false,false,true]
+ - id: 7
+ desc: "IS_NULL_各种类型"
+ tags: ["TODO","目前不支持"]
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["c1","c2","c3","c4","c5","c6","c7","c8","c9"]
+ sql: select * from {0} where d[0] is null;
+ expect:
+ columns: ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ rows:
+ - [1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - id: 8
+ desc: "ISNULL()"
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"10",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ sql: select isnull(c1) as b1,isnull(c2) as b2,isnull(c3) as b3,isnull(c4) as b4,isnull(c5) as b5,isnull(c6) as b6,isnull(c7) as b7,isnull(c8) as b8,isnull(c9) as b9 from {0};
+ expect:
+ order: id
+ columns: ["b1 bool","b2 bool","b3 bool","b4 bool","b5 bool","b6 bool", "b7 bool", "b8 bool", "b9 bool"]
+ rows:
+ - [false,false,false,false,false,false,false,false,false]
+ - [true,true,true,true,true,true,true,true,true]
+ - id: 9
+ desc: 直接和NULL比较返回NULL
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint",
+ "c4 float","c5 double", "c6 timestamp", "c7 string",
+ "nullcol int32", "nulltime timestamp", "nullstr string"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [1, 911, 1024, 3.14, 0.99, 1590738989000, "str", NULL, NULL, NULL]
+ sql: select
+ c1 > nullcol as r1, c1 >= nullcol as r2, c1 < nullcol as r3, c1 <= nullcol as r4, c1 = nullcol as r5, c1 != nullcol as r6,
+ c2 > nullcol as r7, c2 >= nullcol as r8, c2 < nullcol as r9, c2 <= nullcol as r10, c2 = nullcol as r11, c2 != nullcol as r12,
+ c3 > nullcol as r13, c3 >= nullcol as r14, c3 < nullcol as r15, c3 <= nullcol as r16, c3 = nullcol as r17, c3 != nullcol as r18,
+ c4 > nullcol as r19, c4 >= nullcol as r20, c4 < nullcol as r21, c4 <= nullcol as r22, c4 = nullcol as r23, c4 != nullcol as r24,
+ c5 > nullcol as r25, c5 >= nullcol as r26, c5 < nullcol as r27, c5 <= nullcol as r28, c5 = nullcol as r29, c5 != nullcol as r30,
+ c6 > nulltime as r31, c6 >= nulltime as r32, c6 < nulltime as r33, c6 <= nulltime as r34, c6 = nulltime as r35, c6 != nulltime as r36,
+ c7 > nullstr as r37, c7 >= nullstr as r38, c7 < nullstr as r39, c7 <= nullstr as r40, c7 = nullstr as r41, c7 != nullstr as r42,
+ nullstr > nullstr as r43, nullstr >= nullstr as r44, nullstr < nullstr as r45,
+ nullstr <= nullstr as r46, nullstr = nullstr as r47, nullstr != nullstr as r48
+ from {0};
+ expect:
+ columns: ["r1 bool", "r2 bool", "r3 bool", "r4 bool", "r5 bool", "r6 bool", "r7 bool", "r8 bool",
+ "r9 bool", "r10 bool", "r11 bool", "r12 bool", "r13 bool", "r14 bool", "r15 bool", "r16 bool",
+ "r17 bool", "r18 bool", "r19 bool", "r20 bool", "r21 bool", "r22 bool", "r23 bool", "r24 bool",
+ "r25 bool", "r26 bool", "r27 bool", "r28 bool", "r29 bool", "r30 bool", "r31 bool", "r32 bool",
+ "r33 bool", "r34 bool", "r35 bool", "r36 bool", "r37 bool", "r38 bool", "r39 bool", "r40 bool",
+ "r41 bool", "r42 bool", "r43 bool", "r44 bool", "r45 bool", "r46 bool", "r47 bool", "r48 bool"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL]
+ - id: 10
+ desc: 直接和Const NULL比较返回NULL
+ inputs:
+ - columns: ["c1 int16","c2 int32","c3 bigint",
+ "c4 float","c5 double", "c6 timestamp", "c7 string",
+ "nullcol int32", "nulltime timestamp", "nullstr string"]
+ indexs: ["index1:c3:c6"]
+ rows:
+ - [1, 911, 1024, 3.14, 0.99, 1590738989000, "str", NULL, NULL, NULL]
+ sql: select
+ c1 > NULL as r1, c1 >= NULL as r2, c1 < NULL as r3, c1 <= NULL as r4, c1 = NULL as r5, c1 != NULL as r6,
+ c2 > NULL as r7, c2 >= NULL as r8, c2 < NULL as r9, c2 <= NULL as r10, c2 = NULL as r11, c2 != NULL as r12,
+ c3 > NULL as r13, c3 >= NULL as r14, c3 < NULL as r15, c3 <= NULL as r16, c3 = NULL as r17, c3 != NULL as r18,
+ c4 > NULL as r19, c4 >= NULL as r20, c4 < NULL as r21, c4 <= NULL as r22, c4 = NULL as r23, c4 != NULL as r24,
+ c5 > NULL as r25, c5 >= NULL as r26, c5 < NULL as r27, c5 <= NULL as r28, c5 = NULL as r29, c5 != NULL as r30,
+ c6 > NULL as r31, c6 >= NULL as r32, c6 < NULL as r33, c6 <= NULL as r34, c6 = NULL as r35, c6 != NULL as r36,
+ c7 > NULL as r37, c7 >= NULL as r38, c7 < NULL as r39, c7 <= NULL as r40, c7 = NULL as r41, c7 != NULL as r42,
+ NULL > NULL as r43, NULL >= NULL as r44, NULL < NULL as r45,
+ NULL <= NULL as r46, NULL = NULL as r47, NULL != NULL as r48
+ from {0};
+ expect:
+ columns: ["r1 bool", "r2 bool", "r3 bool", "r4 bool", "r5 bool", "r6 bool", "r7 bool", "r8 bool",
+ "r9 bool", "r10 bool", "r11 bool", "r12 bool", "r13 bool", "r14 bool", "r15 bool", "r16 bool",
+ "r17 bool", "r18 bool", "r19 bool", "r20 bool", "r21 bool", "r22 bool", "r23 bool", "r24 bool",
+ "r25 bool", "r26 bool", "r27 bool", "r28 bool", "r29 bool", "r30 bool", "r31 bool", "r32 bool",
+ "r33 bool", "r34 bool", "r35 bool", "r36 bool", "r37 bool", "r38 bool", "r39 bool", "r40 bool",
+ "r41 bool", "r42 bool", "r43 bool", "r44 bool", "r45 bool", "r46 bool", "r47 bool", "r48 bool"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL]
+
+ - id: between_predicate_1
+ desc: between predicate, numberic between
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime bigint", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, A, 1609459201000, 2021-1-1]
+ - [2, Zoey, B, 1609545841000, 2021-1-2]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ sql: |
+ SELECT {0}.id, {0}.name, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts ON {0}.id = {1}.id AND {0}.id BETWEEN 1 AND 4;
+ expect:
+ columns: ["id bigint", "name string", "score int"]
+ rows:
+ - [1, Lucy, 10]
+ - [2, Zoey, 100]
+ - id: between_predicate_2
+ desc: between predicate, string between
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, ABC, 1609459201000, 2021-1-1]
+ - [2, Zoey, BBC, 1609545841000, 2021-1-2]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ sql: |
+ SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts ON {0}.id = {1}.id AND {0}.code NOT BETWEEN 'BBB' AND 'CCC';
+ expect:
+ columns: ["id bigint", "name string", "code string", "score int"]
+ rows:
+ - [1, Lucy, ABC, 10]
+ - [2, Zoey, BBC, NULL]
+ - id: between_predicate_3
+ desc: between predicate, timestamp between
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, A, 1609459201000, 2021-1-1]
+ - [2, Zoey, B, 1633265330000, 2021-10-3]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ sql: |
+ SELECT {0}.id, {0}.name, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts
+ ON {0}.id = {1}.id AND {0}.ctime BETWEEN timestamp("2021-01-01") AND timestamp("2021-01-30");
+ expect:
+ columns: ["id bigint", "name string", "score int"]
+ rows:
+ - [1, Lucy, 10]
+ - [2, Zoey, NULL]
+ - id: between_predicate_4
+ desc: between predicate with aggregation function
+ sql: |
+ SELECT id, col1, std_ts,
+ sum(id) OVER w1 BETWEEN 2 AND 6 as w1_id
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY col1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CURRENT ROW);
+ inputs:
+ - columns: ["id bigint", "col1 int32", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts", "index2:col1:std_ts"]
+ rows:
+ - [1, 1, 1590115420000]
+ - [3, 1, 1590115430000]
+ - [5, 1, 1590115440000]
+ - [7, 1, 1590115450000]
+ - [9, 1, 1590115460000]
+ expect:
+ columns: ["id bigint", "col1 int32", "std_ts timestamp", "w1_id bool"]
+ rows:
+ - [1, 1, 1590115420000, false]
+ - [3, 1, 1590115430000, true]
+ - [5, 1, 1590115440000, false]
+ - [7, 1, 1590115450000, false]
+ - [9, 1, 1590115460000, false]
+ - id: in_predicate_normal
+ desc: normal in predicates
+ mode: hybridse-only
+ sql: |
+ SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts
+ ON {0}.id = {1}.id AND {0}.code d[0] ('A', 'B');
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, A, 1609459201000, 2021-1-1]
+ - [2, Zoey, B, 1633265330000, 2021-10-3]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ dataProvider:
+ - ["in", "not in"]
+ expect:
+ columns: ["id:bigint", "name:string", "code:string", "score:int"]
+ expectProvider:
+ 0:
+ rows:
+ - [ 1, Lucy, A, 10 ]
+ - [ 2, Zoey, B, 100 ]
+ 1:
+ rows:
+ - [ 1, Lucy, A, NULL ]
+ - [ 2, Zoey, B, NULL ]
+ - id: in_predicate_type_conversion
+ desc: type conversion occurred between lhs and in_list
+ mode: hybridse-only
+ sql: |
+ SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts
+ ON {0}.id = {1}.id AND {0}.id d[0] ('1', 3.0);
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, A, 1609459201000, 2021-1-1]
+ - [2, Zoey, B, 1633265330000, 2021-10-3]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ dataProvider:
+ - ["in", "not in"]
+ expect:
+ columns: ["id:bigint", "name:string", "code:string", "score:int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1, Lucy, A, 10]
+ - [2, Zoey, B, NULL]
+ 1:
+ rows:
+ - [1, Lucy, A, NULL]
+ - [2, Zoey, B, 100]
+ - id: in_predicate_subexpr
+ desc: sub expr in in list
+ mode: hybridse-only
+ sql: |
+ SELECT {0}.id, {0}.name, {0}.code, {1}.score FROM {0} LAST JOIN {1} ORDER BY {1}.std_ts
+ ON {0}.id = {1}.id AND {0}.id d[0] ( {1}.score / 10, {1}.score );
+ inputs:
+ - columns: [ "id bigint", "name string", "code string", "ctime timestamp", "cdate date" ]
+ indexs: [ "index1:id:ctime" ]
+ rows:
+ - [1, Lucy, A, 1609459201000, 2021-1-1]
+ - [2, Zoey, B, 1633265330000, 2021-10-3]
+ - columns: [ "id bigint", "std_ts bigint", "score int" ]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, 1609459201000, 10]
+ - [2, 1609459202000, 100]
+ - [3, 1609459203000, 20]
+ - [4, 1609459204000, 30]
+ - [5, 1609459205000, 50]
+ dataProvider:
+ - ["in", "not in"]
+ expect:
+ columns: ["id:bigint", "name:string", "code:string", "score:int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1, Lucy, A, 10]
+ - [2, Zoey, B, NULL]
+ 1:
+ rows:
+ - [1, Lucy, A, NULL]
+ - [2, Zoey, B, 100]
+ - id: in_predicate_with_window
+ desc: test_expresion refers window
+ mode: hybridse-only
+ sql: |
+ SELECT id, col1, std_ts,
+ sum(id) OVER w1 d[0] ( 4, 8, 12 ) as w1_id
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY col1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["in", "not in"]
+ inputs:
+ - columns: ["id bigint", "col1 int32", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts", "index2:col1:std_ts"]
+ rows:
+ - [1, 1, 1590115420000]
+ - [3, 1, 1590115430000]
+ - [5, 1, 1590115440000]
+ - [7, 1, 1590115450000]
+ - [9, 1, 1590115460000]
+ expect:
+ columns: ["id bigint", "col1 int32", "std_ts timestamp", "w1_id bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1, 1, 1590115420000, false]
+ - [3, 1, 1590115430000, true]
+ - [5, 1, 1590115440000, true]
+ - [7, 1, 1590115450000, true]
+ - [9, 1, 1590115460000, false]
+ 1:
+ rows:
+ - [1, 1, 1590115420000, true]
+ - [3, 1, 1590115430000, false]
+ - [5, 1, 1590115440000, false]
+ - [7, 1, 1590115450000, false]
+ - [9, 1, 1590115460000, true]
+# - id: like_predicate_1
+# desc: like predicate without escape
+# inputs:
+# - columns: ["id int", "std_ts timestamp"]
+# indexs: ["index1:id:std_ts"]
+# rows:
+# - [1, 1590115420000 ]
+# - [2, 1590115430000 ]
+# - [3, 1590115440000 ]
+# - [4, 1590115450000 ]
+# - [5, 1590115460000 ]
+# - [6, 1590115470000 ]
+# - columns: ["id int", "ts timestamp", "col2 string"]
+# indexs: ["idx:id:ts"]
+# rows:
+# - [1, 1590115420000, John]
+# - [2, 1590115430000, Mary]
+# - [3, 1590115440000, mike]
+# - [4, 1590115450000, Dan]
+# - [5, 1590115460000, Evan_W]
+# - [6, 1590115470000, M]
+# dataProvider:
+# - ["LIKE", "NOT LIKE", "ILIKE", "NOT ILIKE"] # LIKE / NOT LIKE
+# - ["m%", "M_ry" ] # match pattern
+# sql: |
+# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] 'd[1]';
+# expect:
+# columns: ["id int", "col2 string"]
+# order: id
+# expectProvider:
+# 0:
+# 0:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, mike]
+# - [4, null]
+# - [5, null]
+# - [6, null]
+# 1:
+# rows:
+# - [1, null]
+# - [2, Mary]
+# - [3, null]
+# - [4, null]
+# - [5, null]
+# - [6, null]
+# 1:
+# 0:
+# rows:
+# - [1, John]
+# - [2, Mary]
+# - [3, null]
+# - [4, Dan]
+# - [5, Evan_W]
+# - [6, M]
+# 1:
+# rows:
+# - [1, John]
+# - [2, null]
+# - [3, mike]
+# - [4, Dan]
+# - [5, Evan_W]
+# - [6, M]
+# 2:
+# 0:
+# rows:
+# - [1, null]
+# - [2, Mary]
+# - [3, mike]
+# - [4, null]
+# - [5, null]
+# - [6, M]
+# 1:
+# rows:
+# - [1, null]
+# - [2, Mary]
+# - [3, null]
+# - [4, null]
+# - [5, null]
+# - [6, null]
+# 3:
+# 0:
+# rows:
+# - [1, John]
+# - [2, null]
+# - [3, null]
+# - [4, Dan]
+# - [5, Evan_W]
+# - [6, null]
+# 1:
+# rows:
+# - [1, John]
+# - [2, null]
+# - [3, mike]
+# - [4, Dan]
+# - [5, Evan_W]
+# - [6, M]
+# - id: like_predicate_2
+# desc: like predicate with escape
+# inputs:
+# - columns: ["id int", "std_ts timestamp"]
+# indexs: ["index1:id:std_ts"]
+# rows:
+# - [1, 1590115420000 ]
+# - [2, 1590115430000 ]
+# - [3, 1590115440000 ]
+# - [4, 1590115450000 ]
+# - [5, 1590115460000 ]
+# - [6, 1590115470000 ]
+# - columns: ["id int", "ts timestamp", "col2 string"]
+# indexs: ["idx:id:ts"]
+# rows:
+# - [1, 1590115420000, a*_b]
+# - [2, 1590115430000, a*mb]
+# - [3, 1590115440000, "%a_%b"]
+# - [4, 1590115450000, "Ta_sub"]
+# - [5, 1590115460000, "lamrb"]
+# - [6, 1590115470000, "%a*_%b"]
+# dataProvider:
+# - ["LIKE", "NOT ILIKE"]
+# - ["%", "*", ""] # escape with % or disable
+# sql: |
+# select {0}.id, col2 from {0} last join {1} ON {0}.id = {1}.id AND col2 d[0] '%a*_%b' ESCAPE 'd[1]';
+# expect:
+# columns: ["id int", "col2 string"]
+# order: id
+# expectProvider:
+# 0:
+# 0:
+# rows:
+# - [1, a*_b]
+# - [2, a*mb]
+# - [3, null]
+# - [4, null]
+# - [5, null]
+# - [6, null]
+# 1:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "%a_%b"]
+# - [4, Ta_sub]
+# - [5, null]
+# - [6, null]
+# 2:
+# rows:
+# - [1, a*_b]
+# - [2, a*mb]
+# - [3, null]
+# - [4, null]
+# - [5, null]
+# - [6, "%a*_%b"]
+# 1:
+# 0:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "%a_%b"]
+# - [4, "Ta_sub"]
+# - [5, "lamrb"]
+# - [6, "%a*_%b"]
+# 1:
+# rows:
+# - [1, a*_b]
+# - [2, a*mb]
+# - [3, null]
+# - [4, null]
+# - [5, "lamrb"]
+# - [6, "%a*_%b"]
+# 2:
+# rows:
+# - [1, null]
+# - [2, null]
+# - [3, "%a_%b"]
+# - [4, "Ta_sub"]
+# - [5, "lamrb"]
+# - [6, null]
diff --git a/cases/integration_test/expression/test_type.yaml b/cases/integration_test/expression/test_type.yaml
new file mode 100644
index 00000000000..0c70cfa1a65
--- /dev/null
+++ b/cases/integration_test/expression/test_type.yaml
@@ -0,0 +1,691 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+sqlDialect: ["HybridSQL"]
+cases:
+ - id: 0
+ mode: "offline-unsupport"
+ desc: "cast_各种类型_正确"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"]
+ sql: select cast(d[0] as bool) as b1,cast(d[0] as smallint) as b2,cast(d[0] as int) as b3,cast(d[0] as bigint) as b4,cast(d[0] as float) as b5,cast(d[0] as double) as b6,cast(d[0] as timestamp) as b7,cast(d[0] as string) as b9 from {0};
+ expect:
+ columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [true,30,30,30,30.0,30.0,30,"30"]
+ 1:
+ rows:
+ - [true,30,30,30,30.0,30.0,30,"30"]
+ 2:
+ rows:
+ - [true,30,30,30,30.0,30.0,30,"30"]
+ 3:
+ rows:
+ - [true,30,30,30,30.0,30.0,30,"30"]
+ 4:
+ rows:
+ - [true,30,30,30,30.0,30.0,30,"30"]
+ 5:
+ rows:
+ - [false,0,0,0,0.0,0.0,0,"false"]
+ - id: 1
+ desc: "cast_timestamp/string_正确"
+# tags: ["TODO","本地成功,CICD失败,原因待定位"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c1","{0}.c7"]
+ sql: select cast(d[0] as bool) as b1,cast(d[0] as smallint) as b2,cast(d[0] as int) as b3,cast(d[0] as bigint) as b4,cast(d[0] as float) as b5,cast(d[0] as double) as b6,cast(d[0] as timestamp) as b7,cast(d[0] as date) as b8,cast(d[0] as string) as b9 from {0};
+ expect:
+ columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b8 date","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [null,null,null,null,null,null,null,null,aa]
+ 1:
+ rows:
+ - [true,-20536,1601089480,1590738989000,1590738989000,1590738989000,1590738989000,"2020-05-29","2020-05-29 15:56:29"]
+ - id: 2
+ desc: "cast_string_正确"
+# tags: ["TODO","本地成功,CICD失败,原因待定位"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 string","c3 string","c4 string","c5 string","c6 string","c7 string","c8 string","c9 string","ts1 timestamp"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,"aa","30","30","30","30.0","30.0","1590738989000","2020-05-01","false",1590738989000]
+ sql: select cast(c9 as bool) as b1,cast(c2 as smallint) as b2,cast(c3 as int) as b3,cast(c4 as bigint) as b4,cast(c5 as float) as b5,cast(c6 as double) as b6,cast(c7 as timestamp) as b7,cast(c8 as date) as b8,cast(c1 as string) as b9 from {0};
+ expect:
+ columns: ["b1 bool","b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b7 timestamp","b8 date","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [false,30,30,30,30.0,30.0,1590738989000,"2020-05-01",aa]
+ - id: 3
+ desc: "cast_date_正确"
+# tags: ["TODO","本地成功,CICD失败,原因待定位"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c8"]
+ sql: select cast(d[0] as bool) as b1,cast(d[0] as timestamp) as b7,cast(d[0] as date) as b8,cast(d[0] as string) as b9 from {0};
+ expect:
+ columns: ["b1 bool","b7 timestamp","b8 date","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [null,1588262400000,"2020-05-01","2020-05-01"]
+ - id: 4
+ desc: "cast_其他类型_date_错误"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ sql: select cast(d[0] as date) as b1 from {0};
+ expect:
+ success: false
+ - id: 5
+ desc: "cast_date_其他类型_错误"
+ level: 5
+# tags: ["TODO", "bug"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["smallint","int","bigint","float","double"]
+ sql: select cast(c8 as d[0]) as b1 from {0};
+ expectProvider:
+ 0:
+ columns: ["b1 smallint"]
+ rows:
+ - [NULL]
+ 1:
+ columns: ["b1 int"]
+ rows:
+ 2:
+ columns: ["b1 bigint"]
+ rows:
+ - [NULL]
+ 3:
+ columns: ["b1 float"]
+ rows:
+ - [NULL]
+ 4:
+ columns: ["b1 double"]
+ rows:
+ - [NULL]
+ - id: 6
+ desc: SQL标准Cast语法-Cast(常量 as type)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select cast(1 as int) as f1, cast(2 as bigint) as f2, cast(1 as float) as f3,
+ cast(1 as double) as f4, cast(1 as bool) as f5, cast(1590115420000 as timestamp) as f6,
+ cast(1 as string) as f7 , cast("2020-05-20" as date) as f8 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 string", "f8 date"]
+ rows:
+ - [1, 2, 1.0, 1.0, true, 1590115420000, "1", "2020-05-20"]
+ - id: 7
+ desc: SQL标准Cast语法-Cast(表达式 as type)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select cast(c2 as int) as f1, cast(c1+c2 as bigint) as f2, cast(c1 as float) as f3,
+ cast(c1 as double) as f4, cast(c1 as bool) as f5, cast(c5 as timestamp) as f6,
+ cast(c1 as string) as f7 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 string"]
+ rows:
+ - [1, 2, 1.0, 1.0, true, 1590115420000, "1"]
+ - id: 8
+ desc: cast函数
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select int(1) as f1, bigint(2) as f2, float(1) as f3,
+ double(1) as f4, bool(1) as f5, timestamp(1590115420000) as f6,
+ string(1) as f7 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 string"]
+ rows:
+ - [1, 2, 1.0, 1.0, true, 1590115420000, "1"]
+ - id: 9
+ desc: SQL标准Cast语法-Cast(表达式 as type)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select int(c1) as f1, bigint(c1+c2) as f2, float(c1) as f3,
+ double(c1) as f4, bool(c1) as f5, timestamp(c5) as f6,
+ string(c1) as f7 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 string"]
+ rows:
+ - [1, 2, 1.0, 1.0, true, 1590115420000, "1"]
+ - id: 10
+ desc: SQL标准Cast语法-Cast(表达式 as type)
+ inputs:
+ - columns: ["c0 string", "std_ts bigint", "c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c0:std_ts"]
+ rows:
+ - ["pk", 1, NULL, NULL, NULL]
+ sql: |
+ select cast(c2 as int) as f1, cast(c1+c2 as bigint) as f2, cast(c1 as float) as f3,
+ cast(c1 as double) as f4, cast(c1 as bool) as f5, cast(c5 as timestamp) as f6,
+ cast(c1 as string) as f7 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 string"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL, NULL]
+ - id: 11
+ desc: SQL标准Cast语法-Cast(NULL as type)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select cast(NULL as int) as f1, cast(NULL as bigint) as f2, cast(NULL as float) as f3,
+ cast(NULL as double) as f4, cast(NULL as bool) as f5, cast(NULL as timestamp) as f6,
+ cast(NULL as date) as f7 from {0};
+ expect:
+ columns: ["f1 int", "f2 bigint", "f3 float", "f4 double", "f5 bool", "f6 timestamp",
+ "f7 date"]
+ rows:
+ - [NULL, NULL, NULL, NULL, NULL, NULL, NULL]
+ - id: 12
+ desc: SQL标准Cast语法-Cast(NULL as string)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select cast(NULL as string) as f1 from {0};
+ expect:
+ columns: ["f1 string"]
+ rows:
+ - [NULL]
+ - id: 13
+ desc: cast函数多层子查询
+ mode: "offline-unsupport"
+ tags: ["离线有时差问题"]
+ inputs:
+ - columns: ["c1 int", "c2 string", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, "2020-05-22 10:43:40", 1]
+ sql: |
+ select c1, bigint(c2) DIV 1000 as c2_sec from (select c1, timestamp(c2) as c2 from {0});
+ expect:
+ columns: ["c1 int", "c2_sec bigint"]
+ rows:
+ - [1, 1590115420]
+ - id: 14
+ desc: cast as int
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ int(c1) as int_c1, int(c2) as int_c2, int(c3) as int_c3, int(c4) as int_c4,
+ int(c5) as int_c5, int(c6) as int_c6, int(c8) as int_c8, int(c9) as int_c9, int(c10) as int_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "int_c1 int", "int_c2 int", "int_c3 int", "int_c4 int", "int_c5 int", "int_c6 int",
+ "int_c8 int", "int_c9 int", "int_c10 int"]
+ rows:
+ - [1, 1, 1, 1, 1, 1, NULL, 977520480, 1, 1]
+ - [2, -1, -1, -1, -1, -1, NULL, 977520480, 0, -1]
+ - [3, -1, -1, -1, -1, -1, NULL, 977520480, 0, NULL]
+ - id: 15
+ desc: cast as smallint
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ smallint(c1) as smallint_c1, smallint(c2) as smallint_c2, smallint(c3) as smallint_c3, smallint(c4) as smallint_c4,
+ smallint(c5) as smallint_c5, smallint(c6) as smallint_c6, smallint(c8) as smallint_c8, smallint(c9) as smallint_c9,
+ smallint(c10) as smallint_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "smallint_c1 smallint", "smallint_c2 smallint", "smallint_c3 smallint", "smallint_c4 smallint", "smallint_c5 smallint",
+ "smallint_c6 smallint", "smallint_c8 smallint", "smallint_c9 smallint", "smallint_c10 smallint"]
+ rows:
+ - [1, 1, 1, 1, 1, 1, NULL, -14496, 1, 1]
+ - [2, -1, -1, -1, -1, -1, NULL, -14496, 0, -1]
+ - [3, -1, -1, -1, -1, -1, NULL, -14496, 0, NULL]
+ - id: 16
+ desc: cast as bigint
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ bigint(c1) as bigint_c1, bigint(c2) as bigint_c2, bigint(c3) as bigint_c3, bigint(c4) as bigint_c4,
+ bigint(c5) as bigint_c5, bigint(c6) as bigint_c6, bigint(c8) as bigint_c8, bigint(c9) as bigint_c9,
+ bigint(c10) as bigint_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "bigint_c1 bigint", "bigint_c2 bigint", "bigint_c3 bigint", "bigint_c4 bigint", "bigint_c5 bigint",
+ "bigint_c6 bigint", "bigint_c8 bigint", "bigint_c9 bigint", "bigint_c10 bigint"]
+ rows:
+ - [1, 1, 1, 1, 1, 1, NULL, 1590115420000, 1, 1]
+ - [2, -1, -1, -1, -1, -1, NULL, 1590115420000, 0, -1]
+ - [3, -1, -1, -1, -1, -1, NULL, 1590115420000, 0, NULL]
+ - id: 17
+ desc: cast as float
+ mode: offline-unsupport, python-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ float(c1) as float_c1, float(c2) as float_c2, float(c3) as float_c3, float(c4) as float_c4, float(c5) as float_c5,
+ float(c6) as float_c6, float(c8) as float_c8, float(c9) as float_c9, float(c10) as float_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "float_c1 float", "float_c2 float", "float_c3 float", "float_c4 float", "float_c5 float",
+ "float_c6 float", "float_c8 float", "float_c9 float", "float_c10 float"]
+ rows:
+ - [1, 1.0, 1.0, 1.0, 1.0, 1.0, NULL, 1590115420000.0, 1.0, 1.0]
+ - [2, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, -1.0]
+ - [3, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, NULL]
+ - id: 18
+ desc: cast as double
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ double(c1) as double_c1, double(c2) as double_c2, double(c3) as double_c3, double(c4) as double_c4, double(c5) as double_c5,
+ double(c6) as double_c6, double(c8) as double_c8, double(c9) as double_c9, double(c10) as double_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "double_c1 double", "double_c2 double", "double_c3 double", "double_c4 double", "double_c5 double",
+ "double_c6 double", "double_c8 double", "double_c9 double", "double_c10 double"]
+ rows:
+ - [1, 1.0, 1.0, 1.0, 1.0, 1.0, NULL, 1590115420000.0, 1.0, 1.0]
+ - [2, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, -1.0]
+ - [3, -1.0, -1.0, -1.0, -1.0, -1.0, NULL, 1590115420000.0, 0.0, NULL]
+ - id: 19
+ desc: cast as string
+ mode: offline-unsupport,cli-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ string(c1) as string_c1, string(c2) as string_c2, string(c3) as string_c3, string(c4) as string_c4,
+ string(c5) as string_c5, string(c6) as string_c6, string(c7) as string_c7, string(c8) as string_c8, string(c9) as string_c9,
+ string(c10) as string_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "string_c1 string", "string_c2 string", "string_c3 string", "string_c4 string", "string_c5 string",
+ "string_c6 string", "string_c7 string", "string_c8 string", "string_c9 string", "string_c10 string"]
+ rows:
+ - [1, "1", "1", "1", "1", "1", "2020-05-22 10:43:40", "2020-05-22", "2020-05-22 10:43:40", "true", "1"]
+ - [2, "-1", "-1", "-1", "-1", "-1", "2020-05-22 10:43:40", "2020-05-22", "2020-05-22 10:43:40", "false", "-1"]
+ - [3, "-1", "-1", "-1", "-1", "-1", NULL, "2020-05-22", "2020-05-22 10:43:40", "false", ""]
+ - id: 20
+ desc: cast as date
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ date(c6) as date_c6, date(c7) as date_c7, date(c8) as date_c8, date(c10) as date_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "date_c6 date", "date_c7 date", "date_c8 date", "date_c10 date"]
+ rows:
+ - [1, "2020-05-22", "2020-05-22", "2020-05-22", NULL]
+ - [2, "2020-05-22", "2020-05-22", "2020-05-22", NULL]
+ - [3, NULL, "2020-05-22", "2020-05-22", NULL]
+ - id: 21
+ desc: cast as timestamp
+ mode: offline-unsupport
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ timestamp(c1) as timestamp_c1, timestamp(c2) as timestamp_c2, timestamp(c3) as timestamp_c3, timestamp(c4) as timestamp_c4, timestamp(c5) as timestamp_c5,
+ timestamp(c6) as timestamp_c6, timestamp(c7) as timestamp_c7, timestamp(c8) as timestamp_c8, timestamp(c9) as timestamp_c9, timestamp(c10) as timestamp_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "timestamp_c1 timestamp", "timestamp_c2 timestamp", "timestamp_c3 timestamp", "timestamp_c4 timestamp", "timestamp_c5 timestamp",
+ "timestamp_c6 timestamp", "timestamp_c7 timestamp", "timestamp_c8 timestamp", "timestamp_c9 timestamp", "timestamp_c10 timestamp"]
+ rows:
+ - [1, 1, 1, 1, 1, 1, 1590115420000, 1590076800000, 1590115420000, 1, NULL]
+ - [2, NULL, NULL, NULL, NULL, NULL, 1590115420000, 1590076800000, 1590115420000, 0, NULL]
+ - [3, NULL, NULL, NULL, NULL, NULL, NULL, 1590076800000, 1590115420000, 0, NULL]
+ - id: 22
+ desc: cast as bool
+ mode: offline-unsupport
+# tags: ["TODO", "@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["id int32", "c1 int16", "c2 int32", "c3 int64", "c4 float", "c5 double",
+ "c6 string", "c7 date", "c8 timestamp", "c9 bool", "c10 string"]
+ indexs: ["index1:c6:c8"]
+ rows:
+ - [1, 1, 1, 1, 1.0, 1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, true, "1"]
+ - [2, -1, -1, -1, -1.0, -1.0, "2020-05-22 10:43:40", "2020-05-22", 1590115420000, false, "-1"]
+ - [3, -1, -1, -1, -1.0, -1.0, NULL, "2020-05-22", 1590115420000, false, ""]
+ sql: |
+ select id,
+ bool(c1) as bool_c1, bool(c2) as bool_c2, bool(c3) as bool_c3, bool(c4) as bool_c4, bool(c5) as bool_c5,
+ bool(c6) as bool_c6, bool(c8) as bool_c8, bool(c9) as bool_c9, bool(c10) as bool_c10
+ from {0};
+ expect:
+ order: id
+ columns: ["id int32", "bool_c1 bool", "bool_c2 bool", "bool_c3 bool", "bool_c4 bool", "bool_c5 bool",
+ "bool_c6 bool", "bool_c8 bool", "bool_c9 bool", "bool_c10 bool"]
+ rows:
+ - [1, true, true, true, true, true, NULL, true, true, true]
+ - [2, true, true, true, true, true, NULL, true, false, NULL]
+ - [3, true, true, true, true, true, NULL, true, false, NULL]
+ - id: 23
+ desc: cast string as bool
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 string"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, "t"]
+ - [2, 1, "true"]
+ - [3, 1, "f"]
+ - [4, 1, "false"]
+ - [5, 1, "1"]
+ - [6, 1, "0"]
+ - [7, 1, "y"]
+ - [8, 1, "n"]
+ - [9, 1, "yes"]
+ - [10, 1, "no"]
+ - [11, 1, ""]
+ - [12, 1, "abc"]
+ sql: |
+ select id, bool(c6) as bool_c6 from {0};
+ expect:
+ order: id
+ columns: ["id int64", "bool_c6 bool"]
+ rows:
+ - [1, true]
+ - [2, true]
+ - [3, false]
+ - [4, false]
+ - [5, true]
+ - [6, false]
+ - [7, true]
+ - [8, false]
+ - [9, true]
+ - [10, false]
+ - [11, NULL]
+ - [12, NULL]
+ - id: 24
+ desc: cast float as string
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 float"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1.1]
+ sql: |
+ select id, string(c6) as string_c6 from {0};
+ expect:
+ order: id
+ columns: ["id int64", "string_c6 string"]
+ rows:
+ - [1, "1.1"]
+ - id: 25
+ mode: "offline-unsupport"
+ tags: ["离线有时差问题"]
+ desc: column name prefix with _
+ inputs:
+ - columns: ["_c1 int", "_c2 string", "_c5 bigint"]
+ indexs: ["index1:_c1:_c5"]
+ rows:
+ - [1, "2020-05-22 10:43:40", 1]
+ sql: |
+ select _c1, bigint(_c2) DIV 1000 as _c2_sec from (select _c1, timestamp(_c2) as _c2 from {0});
+ expect:
+ columns: ["_c1 int", "_c2_sec bigint"]
+ rows:
+ - [1, 1590115420]
+ - id: 26
+ desc: cast int to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 int32"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 27
+ desc: cast bigint to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 int64"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 28
+ desc: cast smallint to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 int16"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 29
+ desc: cast float to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 float"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1.0]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 30
+ desc: cast double to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 double"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, 1.0]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 31
+ desc: cast double to date raise error
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 bool"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, true]
+ sql: |
+ select id, date(c6) as date_c6 from {0};
+ expect:
+ success: false
+ - id: 32
+ desc: cast date numbers
+ inputs:
+ - columns: ["id int64", "c2 int32", "c6 date"]
+ indexs: ["index1:c2:id"]
+ rows:
+ - [1, 1, "2020-10-12"]
+ sql: |
+ select id, int16(c6) as int16_c6, int32(c6) as int32_c6, int64(c6) as int64_c6,
+ float(c6) as float_c6, double(c6) as double_c6, bool(c6) as bool_c6 from {0};
+ expect:
+ columns: [ "id int64", "int16_c6 int16", "int32_c6 int32", "int64_c6 int64",
+ "float_c6 float", "double_c6 double", "bool_c6 bool" ]
+ rows:
+ - [ 1, NULL, NULL, NULL, NULL, NULL, NULL]
+ - id: 33
+ desc: SQL标准Cast语法-VARCHAR(expr)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select int(c1) as f1, timestamp(c5) as f2,
+ VARCHAR(c1) as f3 from {0};
+ expect:
+ columns: ["f1 int", "f2 timestamp", "f3 string"]
+ rows:
+ - [1, 1590115420000, "1"]
+ - id: 34
+ desc: SQL标准Cast语法-Cast(expr as VARCHAR)
+ inputs:
+ - columns: ["c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c1:c5"]
+ rows:
+ - [1, 1.0, 1590115420000]
+ sql: |
+ select CAST(c1 as int) as f1, CAST(c5 as timestamp) as f2,
+ CAST(c1 as VARCHAR) as f3, CAST(c1 as VARCHAR(60)) as f4 from {0};
+ expect:
+ columns: ["f1 int", "f2 timestamp", "f3 string", "f4 string"]
+ rows:
+ - [1, 1590115420000, "1", "1"]
+ - id: 35
+ desc: SQL标准Cast语法-Cast(NULL表达式 as VARCHAR)
+ inputs:
+ - columns: ["c0 string", "std_ts bigint", "c1 int", "c2 float", "c5 bigint"]
+ indexs: ["index1:c0:std_ts"]
+ rows:
+ - ["pk", 1, NULL, NULL, NULL]
+ sql: |
+ select cast(c2 as int) as f1, cast(c1 as VARCHAR) as f2, cast(c1 as VARCHAR(60)) as f3 from {0};
+ expect:
+ columns: ["f1 int", "f2 string", "f3 string"]
+ rows:
+ - [NULL, NULL, NULL]
\ No newline at end of file
diff --git a/cases/integration_test/function/test_calculate.yaml b/cases/integration_test/function/test_calculate.yaml
new file mode 100644
index 00000000000..7e4b5f5a3c9
--- /dev/null
+++ b/cases/integration_test/function/test_calculate.yaml
@@ -0,0 +1,254 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: abs-normal
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, -1, 2, 2, -0.2, -0.5,true]
+ - [2, NULL, NULL, 2, NULL, NULL,false]
+ sql: select id as id,
+ abs(c0) as r0,
+ abs(c1) as r1,
+ abs(c2) as r2,
+ abs(c3) as r3,
+ abs(c4) as r4,
+ abs(c5) as r5 from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 int", "r1 int", "r2 bigint", "r3 double", "r4 double","r5 double"]
+ rows:
+ - [1, 1, 2, 2, 0.20000000298023224, 0.5,1.0]
+ - [2, NULL, NULL, 2, NULL , NULL,0.0]
+ - id: 1
+ desc: 三角函数
+ tags: ["暂时不支持bool类型列"]
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 0, 1, 1, 1.0, 0.0,true]
+ - [2, NULL, NULL, 1, NULL, NULL,false]
+ sql: select id as id,
+ cos(c0) as r0,
+ cot(c1) as r1,
+ sin(c2) as r2,
+ tan(c3) as r3,
+ tan(c4) as r4 from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 float","r4 double"]
+ rows:
+ - [1, 1, 0.6420926159343306, 0.8414709848078965, 1.5574077,0.0]
+ - [2, NULL, NULL, 0.8414709848078965, NULL,NULL]
+ - id: 2
+ desc: 反三角函数
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 1, 2, 2, 0.2, 0.5,true]
+ - [2, NULL, NULL, 2, NULL, NULL,false]
+ sql: select id as id,
+ acos(c4) as r0,
+ asin(c3) as r1,
+ atan(c1) as r2,
+ atan2(c1, c2) as r3,
+ asin(c4) as r4
+ from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 double", "r1 float", "r2 double", "r3 double","r4 double"]
+ rows:
+ - [1, 1.0471975511965979, 0.2013579207903308, 1.1071487177940904, 0.78539816339744828,0.5235987755982989]
+ - [2, NULL, NULL, NULL, NULL,NULL]
+ - id: 3
+ desc: 反三角函数-三角函数-常量
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 1, 2, 2, 0.2, 0.5,true]
+ sql: select id as id,
+ cos(1) as r0,
+ cot(2) as r1,
+ sin(1.1) as r2,
+ tan(1) as r3,
+ acos(0) as r4,
+ asin(2.0) as r5,
+ atan(2.2) as r6,
+ atan2(1, 2) as r7,
+ asin(2) as r8
+ from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 double","r4 double", "r5 double", "r6 double", "r7 double", "r8 double"]
+ rows:
+ - [1,0.5403023058681398,-0.45765755436028577,0.8912073600614354,1.5574077246549023,1.5707963267948966,NaN,1.1441688336680205,0.4636476090008061,NaN]
+ - id: 4
+ desc: 对数函数
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 1, 1, 3, 1, 1,true]
+ - [2, NULL, NULL, 3, NULL, NULL,false]
+ sql: select id as id,
+ log(c0) as r0,
+ log(c2, c1) as r1,
+ log2(c3) as r2,
+ log10(c4) as r3,
+ ln(c1) as r4,
+ log(c5) as r5 from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 double", "r1 double", "r2 float", "r3 double", "r4 double","r5 double"]
+ rows:
+ - [1, 0, 0, 0, 0, 0,0.0]
+ - [2, NULL, NULL, NULL, NULL, NULL,-Infinity]
+
+ - id: 5
+ desc: 数值位数函数
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 1, 2, 2, 0.5, 0.7,true]
+ - [2, NULL, NULL, 2, NULL, NULL,false]
+ sql: select id as id,
+ ceil(c0) as r0,
+ ceiling(c1) as r1,
+ floor(c2) as r2,
+ round(c3) as r3,
+ truncate(c4) as r4,
+ floor(c5) as r5 from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 bigint", "r1 bigint", "r2 bigint", "r3 double", "r4 double","r5 double"]
+ rows:
+ - [1, 1, 2, 2, 1.000000, 0.000000,1.0]
+ - [2, NULL, NULL, 2, NULL, NULL,0.0]
+
+ - id: 6
+ desc: 数值幂函数
+ inputs:
+ - columns: ["id int", "c0 smallint", "c1 int", "c2 bigint", "c3 float", "c4 double","c5 bool"]
+ indexs: ["index1:c0:c2"]
+ rows:
+ - [1, 0, 2, 10, 1, 100,true]
+ - [2, NULL, NULL, 10, NULL, NULL,false]
+ sql: select id as id,
+ exp(c0) as r0,
+ pow(c1, c2) as r1,
+ power(c2, c3) as r2,
+ sqrt(c4) as r3,
+ pow(c5,c1) as r4
+ from {0};
+ expect:
+ order: id
+ columns: ["id int", "r0 double", "r1 double", "r2 double", "r3 double","r4 double"]
+ rows:
+ - [1, 1, 1024.000000, 10.000000, 10.000000,1.0]
+ - [2, NULL, NULL, NULL, NULL,NULL]
+ - id: 7
+ desc: "计算函数-单参数-fail"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ sql: select d[0](d[1]) from {0};
+ dataProvider:
+ - ["abs","cos","cot","sin","tan","acos","asin","atan","log","log2","log10","ln","ceil","ceiling","floor","round","truncate","exp","sqrt"]
+ - ["{0}.c1","{0}.c7","{0}.c8"]
+ expect:
+ success: false
+ - id: 8
+ desc: "计算函数-单参数-bool-fail"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ sql: select d[0](d[1]) from {0};
+ dataProvider:
+ - ["cos","cot","sin","tan","acos","asin","atan","sqrt"]
+ - ["{0}.c9"]
+ expect:
+ success: false
+ - id: 9
+ desc: "计算函数-两参数-fail"
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ sql: select d[0](d[1],d[1]) from {0};
+ dataProvider:
+ - ["log","pow","power","atan2"]
+ - ["{0}.c1","{0}.c7","{0}.c8"]
+ expect:
+ success: false
+ - id: 10
+ desc: "mod()_整型_正确"
+ tags: ["TODO","暂时不支持mod()"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"bb",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ sql: select mod(d[0],{1}.c2) as b2,mod(d[0],{1}.c3) as b3,mod(d[0],{1}.c4) as b4,mod(d[0],{1}.c5) as b5,mod(d[0],{1}.c6) as b6,mod(d[0],{1}.c9) as b9 from {0} last join {1} ORDER BY {1}.c7 on {0}.id={1}.id;
+ expect:
+ columns: ["b2 smallint","b3 int","b4 bigint","b5 float","b6 double","b9 smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [0,10,0,7.8,5.8,0]
+ 1:
+ rows:
+ - [0,10,0,7.8,5.8,0]
+ 2:
+ rows:
+ - [0,600,900,333,363,30]
+ 3:
+ rows:
+ - [30,50,60,41.1,42.1,31]
+ 4:
+ rows:
+ - [30,10,0,18.9,17.9,29]
+ 5:
+ columns: ["b2 double","b3 double","b4 double","b5 double","b6 double","b9 double"]
+ rows:
+ - [Infinity,1.5,1.0,2.7027026098198896,2.479338842975207,30.0]
diff --git a/cases/integration_test/function/test_date.yaml b/cases/integration_test/function/test_date.yaml
new file mode 100644
index 00000000000..66e1ce9cbbd
--- /dev/null
+++ b/cases/integration_test/function/test_date.yaml
@@ -0,0 +1,144 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: date_format-normal
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ - [2,2,"aa",30,-30,30,30.0,30.0,NULL,NULL,true]
+ sql: select id as id,date_format(c7,"%Y-%m-%d %H:%M:%S") as e1,date_format(c8,"%Y-%m-%d %H:%M:%S") as e2 from {0};
+ expect:
+ order: id
+ columns: ["id bigint", "e1 string","e2 string"]
+ rows:
+ - [1, "2020-05-29 15:56:29","2020-05-01 00:00:00"]
+ - [2, NULL,NULL]
+ - id: 1
+ desc: date_format-fail
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-01",true]
+ - [2,2,"aa",30,-30,30,30.0,30.0,NULL,NULL,true]
+ dataProvider:
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c9"]
+ sql: select id as id,date_format(d[0],"%Y-%m-%d %H:%M:%S") as e1 from {0};
+ expect:
+ success: false
+ - id: 2
+ desc: 日期函数-normal
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true]
+ - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true]
+ dataProvider:
+ - ["{0}.c4","{0}.c7","{0}.c8"]
+ sql: |
+ select id as id,
+ day(d[0]) as e1,
+ dayofmonth(d[0]) as e2,
+ dayofweek(d[0]) as e3,
+ month(d[0]) as e4,
+ week(d[0]) as e5,
+ weekofyear(d[0]) as e6,
+ year(d[0]) as e7
+ from {0};
+ expect:
+ order: id
+ columns: ["id bigint", "e1 int","e2 int","e3 int","e4 int","e5 int","e6 int","e7 int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,1,1,5,1,1,1,1970]
+ - [2,null,null,null,null,null,null,null]
+ 1:
+ rows:
+ - [1,29,29,6,5,22,22,2020]
+ - [2,null,null,null,null,null,null,null]
+ 2:
+ rows:
+ - [1,2,2,7,5,18,18,2020]
+ - [2,null,null,null,null,null,null,null]
+ - id: 3
+ desc: 一些时间函数-fail
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true]
+ - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true]
+ dataProvider:
+ - ["day","dayofmonth","dayofweek","week","weekofyear","year","month"]
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c5","{0}.c6","{0}.c9"]
+ sql: select id as id,d[0](d[1]) as e1 from {0};
+ expect:
+ success: false
+ - id: 4
+ desc: hour-minute-normal
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true]
+ - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true]
+ dataProvider:
+ - ["{0}.c4","{0}.c7"]
+ sql: select id as id,hour(d[0]) as e1,minute(d[0]) as e2 from {0};
+ expect:
+ order: id
+ columns: ["id bigint", "e1 int","e2 int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,8,0]
+ - [2,null,null]
+ 1:
+ rows:
+ - [1,15,56]
+ - [2,null,null]
+ - id: 5
+ desc: hour-minute-fail
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,-30,30,30.0,30.0,1590738989000,"2020-05-02",true]
+ - [2,2,"aa",30,-30,NULL,30.0,30.0,NULL,NULL,true]
+ dataProvider:
+ - ["hour","minute"]
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c5","{0}.c6","{0}.c8","{0}.c9"]
+ sql: select id as id,d[0](d[1]) as e1 from {0};
+ expect:
+ success: false
\ No newline at end of file
diff --git a/cases/integration_test/function/test_like_match.yaml b/cases/integration_test/function/test_like_match.yaml
new file mode 100644
index 00000000000..5300a4f85e5
--- /dev/null
+++ b/cases/integration_test/function/test_like_match.yaml
@@ -0,0 +1,840 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: "使用_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",30,30,30,30.0,30.0,1590738990000,"2020-05-01",false]
+ - [2,"aab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false]
+ - [3,"a%b",30,30,30,30.0,30.0,1590738992000,"2020-05-01",false]
+ - [4,"b_c",30,30,30,30.0,30.0,1590738993000,"2020-05-01",false]
+ - [5,"abc",30,30,30,30.0,30.0,1590738994000,"2020-05-01",false]
+ - [6,"A0b",30,30,30,30.0,30.0,1590738995000,"2020-05-01",false]
+ - [7,"a#B",30,30,30,30.0,30.0,1590738996000,"2020-05-01",false]
+ - [8,"aaab",30,30,30,30.0,30.0,1590738991000,"2020-05-01",false]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'a_b') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,true]
+ - [2,true]
+ - [3,true]
+ - [4,false]
+ - [5,false]
+ - [6,false]
+ - [7,false]
+ - [8,false]
+ 1:
+ rows:
+ - [1,true]
+ - [2,true]
+ - [3,true]
+ - [4,false]
+ - [5,false]
+ - [6,true]
+ - [7,true]
+ - [8,false]
+ - id: 1
+ desc: "使用%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"a%_b",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"abc",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#B",1590738996000]
+ - [8,"aaab",1590738997000]
+ - [9,"ab",1590738998000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'a%b') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",false]
+ - [7,"a#B",false]
+ - [8,"aaab",true]
+ - [9,"ab",true]
+ 1:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b",true]
+ - [4,"b_c",false]
+ - [5,"abc",false]
+ - [6,"A0b",true]
+ - [7,"a#B",true]
+ - [8,"aaab",true]
+ - [9,"ab",true]
+ - id: 2
+ desc: "同时使用%和_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'_a%b') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",true]
+ - id: 3
+ desc: "使用默认的escape"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,"\\_a%b","\\") as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ - id: 4
+ desc: "指定escape为#"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'#_a%b','#') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ - id: 5
+ desc: "指定escape为_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'__a%b','_') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ - id: 6
+ desc: "指定escape为%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA%b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'_a%%b','%') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",true]
+ - [7,"_a#0B",false]
+ - id: 7
+ desc: "escape不指定"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,"\\_a%b") as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"_a#0B",true]
+ - id: 8
+ desc: "escape为空串,使用\\"
+ mode: cluster-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,'\\\%a_b',1590738990000]
+ - [2,'\\\aabb',1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,'\\\bA0b',1590738995000]
+ - [7,'\\\_a#0B',1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,"\\_a%b","") as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,'\%a_b',true]
+ - [2,'\aabb',true]
+ - [3,'_a%_b',false]
+ - [4,'ba_c',false]
+ - [5,"abb",false]
+ - [6,'\bA0b',false]
+ - [7,'\_a#0B',false]
+ 1:
+ rows:
+ - [1,'\%a_b',true]
+ - [2,'\aabb',true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,'\bA0b',true]
+ - [7,'\_a#0B',true]
+ - id: 9
+ desc: "使用两个%"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"a%_b%0",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"abc",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#Bb",1590738996000]
+ - [8,"aaabbcc",1590738991000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'a%b%') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",false]
+ - [5,"abc",true]
+ - [6,"A0b",false]
+ - [7,"a#Bb",true]
+ - [8,"aaabbcc",true]
+ 1:
+ rows:
+ - [1,"a_b",true]
+ - [2,"aabb",true]
+ - [3,"a%_b%0",true]
+ - [4,"b_c",false]
+ - [5,"abc",true]
+ - [6,"A0b",true]
+ - [7,"a#Bb",true]
+ - [8,"aaabbcc",true]
+ - id: 10
+ desc: "使用两个_"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'_a_b') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",true]
+ - [2,"aabb",true]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA0b",true]
+ - [7,"aa#0B",false]
+ - id: 11
+ desc: "使用两个%,其中一个被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"a_b",1590738990000]
+ - [2,"aab%",1590738991000]
+ - [3,"a%_b%0",1590738992000]
+ - [4,"b_c",1590738993000]
+ - [5,"ab%",1590738994000]
+ - [6,"A0b",1590738995000]
+ - [7,"a#B%",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'a%b#%','#') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab%",true]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",false]
+ - [5,"ab%",true]
+ - [6,"A0b",false]
+ - [7,"a#B%",false]
+ 1:
+ rows:
+ - [1,"a_b",false]
+ - [2,"aab%",true]
+ - [3,"a%_b%0",false]
+ - [4,"b_c",false]
+ - [5,"ab%",true]
+ - [6,"A0b",false]
+ - [7,"a#B%",true]
+ - id: 12
+ desc: "使用两个_,其中一个被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"_A0b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'#_a_b','#') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",true]
+ - [7,"aa#0B",false]
+ - id: 13
+ desc: "同时使用%和_,其中_被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a_b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"_A0b",1590738995000]
+ - [7,"_a#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'#_a%b','#') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",false]
+ - [7,"_a#0B",false]
+ 1:
+ rows:
+ - [1,"%a_b",false]
+ - [2,"aabb",false]
+ - [3,"_a%_b",true]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"_A0b",true]
+ - [7,"_a#0B",true]
+ - id: 14
+ desc: "同时使用%和_,其中%被转义"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"%a%b",1590738990000]
+ - [2,"aabb",1590738991000]
+ - [3,"_a%_b",1590738992000]
+ - [4,"ba_c",1590738993000]
+ - [5,"abb",1590738994000]
+ - [6,"bA%b",1590738995000]
+ - [7,"aa#0B",1590738996000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,c1,d[0](c1,'_a#%b','#') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","v1 bool"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"%a%b",true]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",false]
+ - [7,"aa#0B",false]
+ 1:
+ rows:
+ - [1,"%a%b",true]
+ - [2,"aabb",false]
+ - [3,"_a%_b",false]
+ - [4,"ba_c",false]
+ - [5,"abb",false]
+ - [6,"bA%b",true]
+ - [7,"aa#0B",false]
+ - id: 15
+ desc: "列中有null和空串"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,null,1590738991000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'a%b') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","v1 bool"]
+ rows:
+ - [1,false]
+ - [2,null]
+ - id: 16
+ desc: "使用空串"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'') as v1 from {0};
+ expect:
+ order: id
+ columns: ["id bigint","v1 bool"]
+ rows:
+ - [1,true]
+ - [2,false]
+ - id: 17
+ desc: "使用null"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,null) as v1 from {0};
+ expect:
+ success: false
+ - id: 18
+ desc: "escape使用null"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'a%',null) as v1 from {0};
+ expect:
+ success: false
+ - id: 19
+ desc: "int类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 int","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 20
+ desc: "bigint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bigint","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 21
+ desc: "smallint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 smallint","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 22
+ desc: "float类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 float","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12.0,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 23
+ desc: "double类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 double","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12.0,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 24
+ desc: "timestamp类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 timestamp","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,12,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 25
+ desc: "date类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 date","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"2012-05-01",1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 26
+ desc: "bool类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bool","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,true,1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 27
+ desc: "列不存在"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c2,'1%') as v1 from {0};
+ expect:
+ success: false
+ - id: 28
+ desc: "escape为多个字符"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,null,1590738992000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'a%','<>') as v1 from {0};
+ expect:
+ success: true
+ order: id
+ columns : ["id bigint","v1 bool"]
+ rows:
+ - [1,false]
+ - [2,false]
+ - [3,null]
+ - id: 29
+ desc: "pattern以escape character结尾"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"ab#",1590738990000]
+ - [2,"aa",1590738991000]
+ dataProvider:
+ - ["like_match","ilike_match"]
+ sql: select id,d[0](c1,'a%#','#') as v1 from {0};
+ expect:
+ success: true
+ columns : ["id bigint","v1 bool"]
+ rows:
+ - [1,false]
+ - [2,false]
+
diff --git a/cases/integration_test/function/test_string.yaml b/cases/integration_test/function/test_string.yaml
new file mode 100644
index 00000000000..4b9220122f0
--- /dev/null
+++ b/cases/integration_test/function/test_string.yaml
@@ -0,0 +1,290 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: "concat_各种类型组合"
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ - [2,2,null,null,null,null,null,null,null,null,null]
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ - [2,2,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: |
+ select
+ concat(d[0],{1}.c1) as b1,
+ concat(d[0],{1}.c2) as b2,
+ concat(d[0],{1}.c3) as b3,
+ concat(d[0],{1}.c4) as b4,
+ concat(d[0],{1}.c5) as b5,
+ concat(d[0],{1}.c6) as b6,
+ concat(d[0],{1}.c7) as b7,
+ concat(d[0],{1}.c8) as b8,
+ concat(d[0],{1}.c9) as b9
+ from {0} last join {1} ORDER BY {1}.ts1 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 string","b2 string","b3 string","b4 string","b5 string","b6 string","b7 string","b8 string","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - ["aa","aa0","aa20","aa30","aa11.1","aa12.1","aa2020-05-29 15:56:29","aa2020-05-02","aatrue"]
+ - [null,null,null,null,null,null,null,null,null]
+ 1:
+ rows:
+ - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 2:
+ rows:
+ - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 3:
+ rows:
+ - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 4:
+ rows:
+ - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 5:
+ rows:
+ - ["30","300","3020","3030","3011.1","3012.1","302020-05-29 15:56:29","302020-05-02","30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 6:
+ rows:
+ - ["2020-05-29 15:56:29","2020-05-29 15:56:290","2020-05-29 15:56:2920","2020-05-29 15:56:2930","2020-05-29 15:56:2911.1","2020-05-29 15:56:2912.1","2020-05-29 15:56:292020-05-29 15:56:29","2020-05-29 15:56:292020-05-02","2020-05-29 15:56:29true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 7:
+ rows:
+ - ["2020-05-01","2020-05-010","2020-05-0120","2020-05-0130","2020-05-0111.1","2020-05-0112.1","2020-05-012020-05-29 15:56:29","2020-05-012020-05-02","2020-05-01true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 8:
+ rows:
+ - ["false","false0","false20","false30","false11.1","false12.1","false2020-05-29 15:56:29","false2020-05-02","falsetrue"]
+ - [null,null,null,null,null,null,null,null,null]
+ - id: 1
+ desc: concat三个字符串
+ sqlDialect: ["HybridSQL","MYSQL"]
+ inputs:
+ - columns: ["id int", "c1 string","c2 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa","bbb",1590738989000]
+ sql: select id, c1, c2, concat(c1, c2,"cc") as c12 from {0};
+ expect:
+ columns: ["id int", "c1 string","c2 string", "c12 string"]
+ rows:
+ - [1, "aa", "bbb", "aabbbcc"]
+
+ - id: 2
+ desc: concat_ws一个字符串和三个字符串
+ sqlDialect: ["HybridSQL","MYSQL"]
+ inputs:
+ - columns: ["id int", "c1 string","c2 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa","bbb",1590738989000]
+ sql: select id, c1, concat_ws("-",c2) as c2, concat_ws("-", c1, c2,"cc") as c1_2 from {0};
+ expect:
+ columns: ["id int", "c1 string","c2 string","c1_2 string"]
+ rows:
+ - [1, "aa", "bbb", "aa-bbb-cc"]
+ - id: 3
+ mode: cli-unsupport
+ desc: "concat_ws-所有类型"
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ - [2,2,"aa",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"",0,20,30,11.1,12.1,1590738989001,"2020-05-02",true]
+ - [2,2,null,null,null,null,null,null,null,null,null]
+ dataProvider:
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: |
+ select
+ concat_ws(d[0],{0}.c1,{1}.c1) as b1,
+ concat_ws(d[0],{0}.c1,{1}.c2) as b2,
+ concat_ws(d[0],{0}.c1,{1}.c3) as b3,
+ concat_ws(d[0],{0}.c1,{1}.c4) as b4,
+ concat_ws(d[0],{0}.c1,{1}.c5) as b5,
+ concat_ws(d[0],{0}.c1,{1}.c6) as b6,
+ concat_ws(d[0],{0}.c1,{1}.c7) as b7,
+ concat_ws(d[0],{0}.c1,{1}.c8) as b8,
+ concat_ws(d[0],{0}.c1,{1}.c9) as b9
+ from {0} last join {1} ORDER BY {1}.ts1 on {0}.id={1}.id;
+ expect:
+ columns: ["b1 string","b2 string","b3 string","b4 string","b5 string","b6 string","b7 string","b8 string","b9 string"]
+ expectProvider:
+ 0:
+ rows:
+ - ["aaaa","aaaa0","aaaa20","aaaa30","aaaa11.1","aaaa12.1","aaaa2020-05-29 15:56:29","aaaa2020-05-02","aaaatrue"]
+ - [null,null,null,null,null,null,null,null,null]
+ 1:
+ rows:
+ - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 2:
+ rows:
+ - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 3:
+ rows:
+ - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 4:
+ rows:
+ - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 5:
+ rows:
+ - ["aa30","aa300","aa3020","aa3030","aa3011.1","aa3012.1","aa302020-05-29 15:56:29","aa302020-05-02","aa30true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 6:
+ rows:
+ - ["aa2020-05-29 15:56:29","aa2020-05-29 15:56:290","aa2020-05-29 15:56:2920","aa2020-05-29 15:56:2930","aa2020-05-29 15:56:2911.1","aa2020-05-29 15:56:2912.1","aa2020-05-29 15:56:292020-05-29 15:56:29","aa2020-05-29 15:56:292020-05-02","aa2020-05-29 15:56:29true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 7:
+ rows:
+ - ["aa2020-05-01","aa2020-05-010","aa2020-05-0120","aa2020-05-0130","aa2020-05-0111.1","aa2020-05-0112.1","aa2020-05-012020-05-29 15:56:29","aa2020-05-012020-05-02","aa2020-05-01true"]
+ - [null,null,null,null,null,null,null,null,null]
+ 8:
+ rows:
+ - ["aafalse","aafalse0","aafalse20","aafalse30","aafalse11.1","aafalse12.1","aafalse2020-05-29 15:56:29","aafalse2020-05-02","aafalsetrue"]
+ - [null,null,null,null,null,null,null,null,null]
+ - id: 4
+ desc: strcmp 两个字符串
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id int", "c1 string","c2 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "text","text2",1590738989000]
+ - [2, "text","text",1590738989000]
+ - [3, "text2","text",1590738989000]
+ - [4, null,"text",1590738989000]
+ - [5, "text",null,1590738989000]
+ - [6, null,null,1590738989000]
+ sql: select id, c1, c2, strcmp(c1, c2) as cmp_c1c2 from {0};
+ expect:
+ columns: ["id int", "c1 string","c2 string","cmp_c1c2 int"]
+ order: id
+ rows:
+ - [1, "text", "text2", -1]
+ - [2, "text", "text", 0]
+ - [3, "text2", "text", 1]
+ - [4, null,"text",null]
+ - [5, "text",null,null]
+ - [6, null,null,null]
+ - id: 5
+ desc: "strcmp-fail"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ - ["{0}.c1","{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: select strcmp(d[0],d[1]) from {0};
+ expect:
+ success: false
+ - id: 6
+ desc: "strcmp-string-fail"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ dataProvider:
+ - ["{0}.c1"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: select strcmp(d[0],d[1]) from {0};
+ expect:
+ success: false
+ - id: 7
+ desc: "substr-normal"
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ - [2,2,null,null,null,null,null,null,null,null,null]
+ dataProvider:
+ - ["substr","substring"]
+ sql: |
+ select
+ d[0](c1,3) as b1,
+ d[0](c1,3,2) as b2,
+ d[0](c1,3,20) as b3,
+ d[0](c1,30,2) as b4,
+ d[0](c1,30) as b5
+ from {0};
+ expect:
+ columns: ["b1 string","b2 string","b3 string","b4 string","b5 string"]
+ expectProvider:
+ 0:
+ rows:
+ - ["3456789","34","3456789","",""]
+ - [null,null,null,null,null]
+ 1:
+ rows:
+ - ["3456789","34","3456789","",""]
+ - [null,null,null,null,null]
+
+ - id: 8
+ desc: "substr-fail"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id bigint","ts1 bigint","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:id:ts1"]
+ rows:
+ - [1,1,"123456789",30,30,30,30.0,30.0,1590738989000,"2020-05-01",false]
+ - [2,2,null,null,null,null,null,null,null,null,null]
+ dataProvider:
+ - ["substr","substring"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6","{0}.c7","{0}.c8","{0}.c9"]
+ sql: select d[0](d[1],1) from {0};
+ expect:
+ success: false
diff --git a/cases/integration_test/function/test_udaf_function.yaml b/cases/integration_test/function/test_udaf_function.yaml
new file mode 100644
index 00000000000..0642ed737fa
--- /dev/null
+++ b/cases/integration_test/function/test_udaf_function.yaml
@@ -0,0 +1,2563 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: max
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, max(c2) OVER w1 as m2,max(c3) OVER w1 as m3,max(c4) OVER w1 as m4,max(c5) OVER w1 as m5,max(c6) OVER w1 as m6,max(c7) OVER w1 as m7,max(c8) OVER w1 as m8,max(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",4,4,33,1.4,2.4,1590738992000,"2020-05-03","c"]
+ - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-03","c"]
+ -
+ id: 1
+ desc: min
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, min(c2) OVER w1 as m2,min(c3) OVER w1 as m3,min(c4) OVER w1 as m4,min(c5) OVER w1 as m5,min(c6) OVER w1 as m6,min(c7) OVER w1 as m7,min(c8) OVER w1 as m8,min(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [4,"aa",3,3,32,1.3,2.3,1590738991000,"2020-05-02","b"]
+ -
+ id: 2
+ desc: count
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, count(c2) OVER w1 as m2,count(c3) OVER w1 as m3,count(c4) OVER w1 as m4,count(c5) OVER w1 as m5,count(c6) OVER w1 as m6,count(c7) OVER w1 as m7,count(c8) OVER w1 as m8,count(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"]
+ rows:
+ - [1,"aa",1,1,1,1,1,1,1,1]
+ - [2,"aa",2,2,2,2,2,2,2,2]
+ - [3,"aa",3,3,3,3,3,3,3,3]
+ - [4,"aa",2,2,2,2,2,3,2,2]
+ -
+ id: 3
+ desc: sum
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, sum(c2) OVER w1 as m2,sum(c3) OVER w1 as m3,sum(c4) OVER w1 as m4,sum(c5) OVER w1 as m5,sum(c6) OVER w1 as m6 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1]
+ - [2,"aa",5,5,63,2.5,4.5]
+ - [3,"aa",8,8,95,3.7999997,6.799999999999999]
+ - [4,"aa",7,7,65,2.7,4.7]
+ -
+ id: 4
+ desc: avg
+ version: 0.6.0
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1, NULL,30, 1.1, 2.1, 1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4, 4, 33, 1.4, 2.4, 1590738991000,"2020-05-03","c",false]
+ - [3,"aa",1, 1, 33, 1.1, 2.1, 1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1,
+ avg(c2) OVER w1 as m2,
+ avg(c3) OVER w1 as m3,
+ avg(c4) OVER w1 as m4,
+ avg(c5) OVER w1 as m5,
+ avg(c6) OVER w1 as m6,
+ avg(c3 + 1) over w1 as m7
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"]
+ rows:
+ - [1, aa, 1, NULL, 30, 1.100000023841858,2.1, NULL]
+ - [2, aa, 2.5, 4.0, 31.5, 1.25, 2.25, 5.0]
+ - [3, aa, 2, 2.5, 32, 1.200000007947286,2.1999999999999997,3.5]
+ - [4, aa, 2.5, 2.5, 33, 1.25, 2.25, 3.5]
+ -
+ id: 5
+ desc: distinct_count
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool","ts timestamp"]
+ indexs: ["index1:c1:ts"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true,1590738990000]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",false,1590738991000]
+ - [3,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-02","c",true,1590738992000]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL,1590738993000]
+ sql: |
+ SELECT {0}.id, c1, distinct_count(c2) OVER w1 as m2,distinct_count(c3) OVER w1 as m3,distinct_count(c4) OVER w1 as m4,distinct_count(c5) OVER w1 as m5,distinct_count(c6) OVER w1 as m6,distinct_count(c7) OVER w1 as m7,distinct_count(c8) OVER w1 as m8,distinct_count(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint"]
+ rows:
+ - [1,"aa",1,1,1,1,1,1,1,1]
+ - [2,"aa",2,2,2,2,2,2,2,2]
+ - [3,"aa",2,2,2,2,2,2,2,2]
+ - [4,"aa",2,2,2,2,2,2,2,2]
+ -
+ id: 6
+ desc: count/distinct_count-bool
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",1,1,33,1.1,2.1,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, count(c10) OVER w1 as count_bool, distinct_count(c10) OVER w1 as distinct_count_bool
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int", "count_bool bigint", "distinct_count_bool bigint"]
+ rows:
+ - [1,1,1]
+ - [2,2,2]
+ - [3,3,2]
+ - [4,2,2]
+ -
+ id: 7
+ desc: sum-timestamp
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id bigint","c1 string", "c2 timestamp", "c3 timestamp"]
+ indexs: ["index1:c1:c2"]
+ rows:
+ - [1,"aa",1590738990000,1590738990000]
+ - [2,"aa",1590738991000,1590738991000]
+ - [3,"aa",1590738992000,1590738992000]
+ - [4,"aa",1590738993000,NULL]
+ sql: |
+ SELECT {0}.id, sum(c3) OVER w1 as m2 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "m2 timestamp"]
+ rows:
+ - [1, 1590738990000]
+ - [2, 3181477981000]
+ - [3, 4772216973000]
+ - [4, 3181477983000]
+ -
+ id: 8
+ desc: avg-timestamp
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"]
+ sql: |
+ SELECT {0}.id, c1,avg(c7) OVER w1 as m7 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 9
+ desc: sum-date
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"]
+ sql: |
+ SELECT {0}.id, c1,sum(c8) OVER w1 as m8 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 10
+ desc: sum-string
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"]
+ sql: |
+ SELECT {0}.id, c1,sum(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 11
+ desc: avg-date
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"]
+ sql: |
+ SELECT {0}.id, c1,avg(c8) OVER w1 as m8 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 12
+ desc: avg-string
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [2,"aa",4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+ - [3,"aa",3,32,1.3,2.3,1590738992000,"2020-05-02","b"]
+ sql: |
+ SELECT {0}.id, c1,avg(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 13
+ desc: MAX_WHERE-normal
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [0, "00", 5, 3, 10, 1.0, 4.4, 1590738990000, "2020-05-01", "a", false]
+ - [1, "aa", 1, 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", "a", true]
+ - [2, "aa", 4, 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", "c", false]
+ - [3, "aa", 3, 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true]
+ - [4, "aa", NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, max_where(c2,c2<4) OVER w1 as m2,max_where(c3,c3<4) OVER w1 as m3,max_where(c4,c10) OVER w1 as m4,max_where(c5,c5<=1.3) OVER w1 as m5,max_where(c6,c6<=2.3) OVER w1 as m6 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"]
+ rows:
+ - [0,"00",NULL,3,NULL,1.0,NULL]
+ - [1,"aa",1,1,30,1.1,2.1]
+ - [2,"aa",1,1,30,1.1,2.1]
+ - [3,"aa",3,3,32,1.3,2.3]
+ - [4,"aa",3,3,32,1.3,2.3]
+ -
+ id: 14
+ desc: MIN_WHERE-normal
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, min_where(c2,c2>2) OVER w1 as m2,min_where(c3,c3>=3) OVER w1 as m3,min_where(c4,c4<33) OVER w1 as m4,min_where(c5,c5<=2) OVER w1 as m5,min_where(c6,c10) OVER w1 as m6 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double"]
+ rows:
+ - [1,"aa",NULL,NULL,30,1.1,2.1]
+ - [2,"aa",4,4,30,1.1,2.1]
+ - [3,"aa",3,3,30,1.1,2.1]
+ - [4,"aa",3,3,32,1.3,2.3]
+ -
+ id: 15
+ desc: SUM_WHERE-normal
+ sqlDialect: ["HybridSQL"]
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01","a",true]
+ - [2,"aa",4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03","c",false]
+ - [3,"aa",3, NULL,33, 1.3, 2.3, 1590738992000, "2020-05-02","b",true]
+ - [4,"aa",NULL,3, 32, 1.1, NULL,1590738993000, NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1,
+ sum_where(c2,c2<4) OVER w1 as m2,
+ sum_where(c3,c3<4) OVER w1 as m3,
+ sum_where(c4,c4<33) OVER w1 as m4,
+ sum_where(c5,c5<=1.3) OVER w1 as m5,
+ sum_where(c6,c10) OVER w1 as m6,
+ sum_where(c2, c2 = null) over w1 as m7
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double", "m7 smallint"]
+ rows:
+ - [1, "aa", 1, 1, 30, NULL, 2.1, NULL]
+ - [2, "aa", 1, 1, 30, NULL, 2.1, NULL]
+ - [3, "aa", 4, 1, 30, 1.3, 4.4, NULL]
+ - [4, "aa", 3, 3, 32, 2.4, 2.3, NULL]
+ -
+ id: 16
+ desc: AVG_WHERE-normal
+ sqlDialect: ["HybridSQL"]
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1, "aa", 1, 1, 30, NULL,2.1, 1590738990000, "2020-05-01", "a", true]
+ - [2, "aa", 4, 4, NULL,1.4, 2.4, 1590738991000, "2020-05-03", "c", false]
+ - [3, "aa", 3, NULL,32, 1.3, 2.3, 1590738992000, "2020-05-02", "b", true]
+ - [4, "aa", NULL,3, 33, 1.1, NULL,1590738993000, NULL, NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1,
+ avg_where(c2, c2<4) OVER w1 as m2,
+ avg_where(c3, c3<4) OVER w1 as m3,
+ avg_where(c4, c4<33) OVER w1 as m4,
+ avg_where(c5, c5<=1.3) OVER w1 as m5,
+ avg_where(c6, c10) OVER w1 as m6,
+ avg_where(c3, c3 = null) over w1 as m7
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double", "m7 double"]
+ rows:
+ - [1, aa, 1, 1, 30, NULL, 2.1, NULL]
+ - [2, aa, 1, 1, 30, NULL, 2.1, NULL]
+ - [3, aa, 2, 1, 31, 1.2999999523162842, 2.2, NULL]
+ - [4, aa, 3, 3, 32, 1.199999988079071, 2.3, NULL]
+ -
+ id: 17
+ desc: COUNT_WHERE-normal
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, count_where(c2,c2<4) OVER w1 as m2,count_where(c3,c3<4) OVER w1 as m3,count_where(c4,c4<33) OVER w1 as m4,count_where(c5,c5<=1.3) OVER w1 as m5,count_where(c6,c10) OVER w1 as m6,
+ count_where(c7,c10) OVER w1 as m7,count_where(c8,c10) OVER w1 as m8,count_where(c9,c10) OVER w1 as m9, count_where(*,c3<4) over w1 as m10 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 bigint","m3 bigint","m4 bigint","m5 bigint","m6 bigint","m7 bigint","m8 bigint","m9 bigint","m10 bigint"]
+ rows:
+ - [1,"aa",1,1,1,1,1,1,1,1,1]
+ - [2,"aa",1,1,1,1,1,1,1,1,1]
+ - [3,"aa",2,2,2,2,2,2,2,2,2]
+ - [4,"aa",1,1,1,1,1,1,1,1,1]
+ -
+ id: 18
+ desc: AVG_WHERE/MAX_WHERE/MIN_WHERE/SUM_WHERE-fail
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ dataProvider:
+ - ["avg_where","sum_where","max_where","min_where"]
+ - ["c7","c8","c9","c10"]
+ sql: |
+ SELECT {0}.id, c1, d[0](d[1],c10) OVER w1 as m2 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 19
+ desc: COUNT_WHERE-fail
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ dataProvider:
+ - ["count_where"]
+ - ["c10"]
+ sql: |
+ SELECT {0}.id, c1, d[0](d[1],c10) OVER w1 as m2 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 20
+ desc: max_cate-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ max_cate({0}.c2,d[0]) OVER w1 as m2,
+ max_cate({0}.c3,d[0]) OVER w1 as m3,
+ max_cate({0}.c4,d[0]) OVER w1 as m4,
+ max_cate({0}.c5,d[0]) OVER w1 as m5,
+ max_cate({0}.c6,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"]
+ - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"]
+ - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:4","1:4","1:33","1:1.400000","1:2.400000"]
+ - [3,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:4","30:4","30:33","30:1.400000","30:2.400000"]
+ - [3,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"]
+ - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:4","2020-05-29 15:56:30:4","2020-05-29 15:56:30:33","2020-05-29 15:56:30:1.400000","2020-05-29 15:56:30:2.400000"]
+ - [3,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:4","2020-05-01:4","2020-05-01:33","2020-05-01:1.400000","2020-05-01:2.400000"]
+ - [3,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:4","a:4","a:33","a:1.400000","a:2.400000"]
+ - [3,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"]
+ - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"]
+ -
+ id: 21
+ desc: min_cate-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ min_cate({0}.c2,d[0]) OVER w1 as m2,
+ min_cate({0}.c3,d[0]) OVER w1 as m3,
+ min_cate({0}.c4,d[0]) OVER w1 as m4,
+ min_cate({0}.c5,d[0]) OVER w1 as m5,
+ min_cate({0}.c6,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"]
+ - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"]
+ - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"]
+ -
+ id: 22
+ desc: count_cate-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ count_cate({0}.c2,d[0]) OVER w1 as m2,
+ count_cate({0}.c3,d[0]) OVER w1 as m3,
+ count_cate({0}.c4,d[0]) OVER w1 as m4,
+ count_cate({0}.c5,d[0]) OVER w1 as m5,
+ count_cate({0}.c6,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:2","1:2","1:2","1:2","1:2"]
+ - [3,"aa","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1"]
+ - [4,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:2","1:2","1:2","1:2","1:2"]
+ - [3,"aa","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1","1:2,2:1"]
+ - [4,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:1","30:1","30:1"]
+ - [2,"aa","30:2","30:2","30:2","30:2","30:2"]
+ - [3,"aa","30:2,32:1","30:2,32:1","30:2,32:1","30:2,32:1","30:2,32:1"]
+ - [4,"aa","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"]
+ - [2,"aa","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2","2020-05-29 15:56:30:2"]
+ - [3,"aa","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1","2020-05-29 15:56:30:2,2020-05-29 15:56:32:1"]
+ - [4,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"]
+ - [2,"aa","2020-05-01:2","2020-05-01:2","2020-05-01:2","2020-05-01:2","2020-05-01:2"]
+ - [3,"aa","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1","2020-05-01:2,2020-05-02:1"]
+ - [4,"aa","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:1","a:1","a:1"]
+ - [2,"aa","a:2","a:2","a:2","a:2","a:2"]
+ - [3,"aa","a:2,b:1","a:2,b:1","a:2,b:1","a:2,b:1","a:2,b:1"]
+ - [4,"aa","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1"]
+ -
+ id: 23
+ desc: sum_cate-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ sum_cate({0}.c2,d[0]) OVER w1 as m2,
+ sum_cate({0}.c3,d[0]) OVER w1 as m3,
+ sum_cate({0}.c4,d[0]) OVER w1 as m4,
+ sum_cate({0}.c5,d[0]) OVER w1 as m5,
+ sum_cate({0}.c6,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:5","1:5","1:63","1:2.500000","1:4.500000"]
+ - [3,"aa","1:5,2:3","1:5,2:3","1:63,2:32","1:2.500000,2:1.300000","1:4.500000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:5","1:5","1:63","1:2.500000","1:4.500000"]
+ - [3,"aa","1:5,2:3","1:5,2:3","1:63,2:32","1:2.500000,2:1.300000","1:4.500000,2:2.300000"]
+ - [4,"aa","1:4,2:3","1:4,2:3","1:33,2:32","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:5","30:5","30:63","30:2.500000","30:4.500000"]
+ - [3,"aa","30:5,32:3","30:5,32:3","30:63,32:32","30:2.500000,32:1.300000","30:4.500000,32:2.300000"]
+ - [4,"aa","30:4,32:3","30:4,32:3","30:33,32:32","30:1.400000,32:1.300000","30:2.400000,32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:5","2020-05-29 15:56:30:5","2020-05-29 15:56:30:63","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:4.500000"]
+ - [3,"aa","2020-05-29 15:56:30:5,2020-05-29 15:56:32:3","2020-05-29 15:56:30:5,2020-05-29 15:56:32:3","2020-05-29 15:56:30:63,2020-05-29 15:56:32:32","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:4.500000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:4,2020-05-29 15:56:32:3","2020-05-29 15:56:30:33,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:5","2020-05-01:5","2020-05-01:63","2020-05-01:2.500000","2020-05-01:4.500000"]
+ - [3,"aa","2020-05-01:5,2020-05-02:3","2020-05-01:5,2020-05-02:3","2020-05-01:63,2020-05-02:32","2020-05-01:2.500000,2020-05-02:1.300000","2020-05-01:4.500000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-01:4,2020-05-02:3","2020-05-01:4,2020-05-02:3","2020-05-01:33,2020-05-02:32","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:5","a:5","a:63","a:2.500000","a:4.500000"]
+ - [3,"aa","a:5,b:3","a:5,b:3","a:63,b:32","a:2.500000,b:1.300000","a:4.500000,b:2.300000"]
+ - [4,"aa","a:4,b:3","a:4,b:3","a:33,b:32","a:1.400000,b:1.300000","a:2.400000,b:2.300000"]
+ -
+ id: 24
+ desc: avg_cate-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ avg_cate({0}.c2,d[0]) OVER w1 as m2,
+ avg_cate({0}.c3,d[0]) OVER w1 as m3,
+ avg_cate({0}.c4,d[0]) OVER w1 as m4,
+ avg_cate({0}.c5,d[0]) OVER w1 as m5,
+ avg_cate({0}.c6,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:2.500000","1:2.500000","1:31.500000","1:1.250000","1:2.250000"]
+ - [3,"aa","1:2.500000,2:3.000000","1:2.500000,2:3.000000","1:31.500000,2:32.000000","1:1.250000,2:1.300000","1:2.250000,2:2.300000"]
+ - [4,"aa","1:4.000000,2:3.000000","1:4.000000,2:3.000000","1:33.000000,2:32.000000","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:2.500000","1:2.500000","1:31.500000","1:1.250000","1:2.250000"]
+ - [3,"aa","1:2.500000,2:3.000000","1:2.500000,2:3.000000","1:31.500000,2:32.000000","1:1.250000,2:1.300000","1:2.250000,2:2.300000"]
+ - [4,"aa","1:4.000000,2:3.000000","1:4.000000,2:3.000000","1:33.000000,2:32.000000","1:1.400000,2:1.300000","1:2.400000,2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"]
+ - [2,"aa","30:2.500000","30:2.500000","30:31.500000","30:1.250000","30:2.250000"]
+ - [3,"aa","30:2.500000,32:3.000000","30:2.500000,32:3.000000","30:31.500000,32:32.000000","30:1.250000,32:1.300000","30:2.250000,32:2.300000"]
+ - [4,"aa","30:4.000000,32:3.000000","30:4.000000,32:3.000000","30:33.000000,32:32.000000","30:1.400000,32:1.300000","30:2.400000,32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:2.500000","2020-05-29 15:56:30:31.500000","2020-05-29 15:56:30:1.250000","2020-05-29 15:56:30:2.250000"]
+ - [3,"aa","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:2.500000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:31.500000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.250000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.250000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:30:4.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:4.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:33.000000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.400000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.400000,2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:2.500000","2020-05-01:2.500000","2020-05-01:31.500000","2020-05-01:1.250000","2020-05-01:2.250000"]
+ - [3,"aa","2020-05-01:2.500000,2020-05-02:3.000000","2020-05-01:2.500000,2020-05-02:3.000000","2020-05-01:31.500000,2020-05-02:32.000000","2020-05-01:1.250000,2020-05-02:1.300000","2020-05-01:2.250000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-01:4.000000,2020-05-02:3.000000","2020-05-01:4.000000,2020-05-02:3.000000","2020-05-01:33.000000,2020-05-02:32.000000","2020-05-01:1.400000,2020-05-02:1.300000","2020-05-01:2.400000,2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"]
+ - [2,"aa","a:2.500000","a:2.500000","a:31.500000","a:1.250000","a:2.250000"]
+ - [3,"aa","a:2.500000,b:3.000000","a:2.500000,b:3.000000","a:31.500000,b:32.000000","a:1.250000,b:1.300000","a:2.250000,b:2.300000"]
+ - [4,"aa","a:4.000000,b:3.000000","a:4.000000,b:3.000000","a:33.000000,b:32.000000","a:1.400000,b:1.300000","a:2.400000,b:2.300000"]
+ -
+ id: 25
+ desc: "*_cate-fail1"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["avg_cate","sum_cate","max_cate","min_cate","count_cate"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ - ["{1}.c5","{1}.c6","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],d[2]) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 26
+ desc: "*_cate-fail2"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["avg_cate","sum_cate","max_cate","min_cate","count_cate"]
+ - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"]
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],d[2]) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 27
+ desc: max_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ max_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2,
+ max_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3,
+ max_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4,
+ max_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5,
+ max_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 28
+ desc: min_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ min_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2,
+ min_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3,
+ min_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4,
+ min_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5,
+ min_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 29
+ desc: count_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ count_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2,
+ count_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3,
+ count_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4,
+ count_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5,
+ count_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [3,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"]
+ - [4,"aa","2:1","2:1","2:1","2:1","2:1"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [3,"aa","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1","1:1,2:1"]
+ - [4,"aa","2:1","2:1","2:1","2:1","2:1"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:1","30:1","30:1"]
+ - [2,"aa","30:1","30:1","30:1","30:1","30:1"]
+ - [3,"aa","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1","30:1,32:1"]
+ - [4,"aa","32:1","32:1","32:1","32:1","32:1"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"]
+ - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1","2020-05-29 15:56:30:1,2020-05-29 15:56:32:1"]
+ - [4,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"]
+ - [3,"aa","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1","2020-05-01:1,2020-05-02:1"]
+ - [4,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:1","a:1","a:1"]
+ - [2,"aa","a:1","a:1","a:1","a:1","a:1"]
+ - [3,"aa","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1","a:1,b:1"]
+ - [4,"aa","b:1","b:1","b:1","b:1","b:1"]
+ -
+ id: 30
+ desc: sum_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ sum_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2,
+ sum_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3,
+ sum_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4,
+ sum_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5,
+ sum_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1,2:3","1:1,2:3","1:30,2:32","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","30:1,32:3","30:1,32:3","30:30,32:32","30:1.100000,32:1.300000","30:2.100000,32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:1,2020-05-29 15:56:32:3","2020-05-29 15:56:30:30,2020-05-29 15:56:32:32","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-01:1,2020-05-02:3","2020-05-01:1,2020-05-02:3","2020-05-01:30,2020-05-02:32","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","a:1,b:3","a:1,b:3","a:30,b:32","a:1.100000,b:1.300000","a:2.100000,b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 31
+ desc: avg_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ avg_cate_where({0}.c2,{0}.c10,d[0]) OVER w1 as m2,
+ avg_cate_where({0}.c3,{0}.c10,d[0]) OVER w1 as m3,
+ avg_cate_where({0}.c4,{0}.c10,d[0]) OVER w1 as m4,
+ avg_cate_where({0}.c5,{0}.c10,d[0]) OVER w1 as m5,
+ avg_cate_where({0}.c6,{0}.c10,d[0]) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1.000000,2:3.000000","1:1.000000,2:3.000000","1:30.000000,2:32.000000","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [3,"aa","1:1.000000,2:3.000000","1:1.000000,2:3.000000","1:30.000000,2:32.000000","1:1.100000,2:1.300000","1:2.100000,2:2.300000"]
+ - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"]
+ - [3,"aa","30:1.000000,32:3.000000","30:1.000000,32:3.000000","30:30.000000,32:32.000000","30:1.100000,32:1.300000","30:2.100000,32:2.300000"]
+ - [4,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:30:1.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:1.000000,2020-05-29 15:56:32:3.000000","2020-05-29 15:56:30:30.000000,2020-05-29 15:56:32:32.000000","2020-05-29 15:56:30:1.100000,2020-05-29 15:56:32:1.300000","2020-05-29 15:56:30:2.100000,2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-01:1.000000,2020-05-02:3.000000","2020-05-01:1.000000,2020-05-02:3.000000","2020-05-01:30.000000,2020-05-02:32.000000","2020-05-01:1.100000,2020-05-02:1.300000","2020-05-01:2.100000,2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"]
+ - [3,"aa","a:1.000000,b:3.000000","a:1.000000,b:3.000000","a:30.000000,b:32.000000","a:1.100000,b:1.300000","a:2.100000,b:2.300000"]
+ - [4,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"]
+ -
+ id: 32
+ desc: "*_cate_where-fail1"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["avg_cate_where","sum_cate_where","max_cate_where","min_cate_where","count_cate_where"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ - ["{1}.c5","{1}.c6","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],{0}.c10,d[2]) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 33
+ desc: "*_cate_where-fail2"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["avg_cate_where","sum_cate_where","max_cate_where","min_cate_where","count_cate_where"]
+ - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"]
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],{0}.c10,d[2]) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 34
+ desc: top_n_key_max_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ top_n_key_max_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2,
+ top_n_key_max_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3,
+ top_n_key_max_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4,
+ top_n_key_max_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5,
+ top_n_key_max_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 35
+ desc: top_n_key_min_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ top_n_key_min_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2,
+ top_n_key_min_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3,
+ top_n_key_min_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4,
+ top_n_key_min_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5,
+ top_n_key_min_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 36
+ desc: top_n_key_sum_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ top_n_key_sum_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2,
+ top_n_key_sum_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3,
+ top_n_key_sum_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4,
+ top_n_key_sum_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5,
+ top_n_key_sum_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1","1:1","1:30","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3","2:3","2:32","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1","30:1","30:30","30:1.100000","30:2.100000"]
+ - [3,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ - [4,"aa","32:3","32:3","32:32","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:30","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3","2020-05-29 15:56:32:3","2020-05-29 15:56:32:32","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:30","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3","2020-05-02:3","2020-05-02:32","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1","a:1","a:30","a:1.100000","a:2.100000"]
+ - [3,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ - [4,"aa","b:3","b:3","b:32","b:1.300000","b:2.300000"]
+ -
+ id: 37
+ desc: top_n_key_avg_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ top_n_key_avg_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2,
+ top_n_key_avg_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3,
+ top_n_key_avg_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4,
+ top_n_key_avg_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5,
+ top_n_key_avg_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ 1:
+ rows:
+ - [1,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [2,"aa","1:1.000000","1:1.000000","1:30.000000","1:1.100000","1:2.100000"]
+ - [3,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ - [4,"aa","2:3.000000","2:3.000000","2:32.000000","2:1.300000","2:2.300000"]
+ 2:
+ rows:
+ - [1,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"]
+ - [2,"aa","30:1.000000","30:1.000000","30:30.000000","30:1.100000","30:2.100000"]
+ - [3,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"]
+ - [4,"aa","32:3.000000","32:3.000000","32:32.000000","32:1.300000","32:2.300000"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [2,"aa","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:1.000000","2020-05-29 15:56:30:30.000000","2020-05-29 15:56:30:1.100000","2020-05-29 15:56:30:2.100000"]
+ - [3,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ - [4,"aa","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:3.000000","2020-05-29 15:56:32:32.000000","2020-05-29 15:56:32:1.300000","2020-05-29 15:56:32:2.300000"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [2,"aa","2020-05-01:1.000000","2020-05-01:1.000000","2020-05-01:30.000000","2020-05-01:1.100000","2020-05-01:2.100000"]
+ - [3,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"]
+ - [4,"aa","2020-05-02:3.000000","2020-05-02:3.000000","2020-05-02:32.000000","2020-05-02:1.300000","2020-05-02:2.300000"]
+ 5:
+ rows:
+ - [1,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"]
+ - [2,"aa","a:1.000000","a:1.000000","a:30.000000","a:1.100000","a:2.100000"]
+ - [3,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"]
+ - [4,"aa","b:3.000000","b:3.000000","b:32.000000","b:1.300000","b:2.300000"]
+ -
+ id: 38
+ desc: top_n_key_count_cate_where-normal
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c7","{1}.c8","{1}.c9"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ top_n_key_count_cate_where({0}.c2,{0}.c10,d[0],1) OVER w1 as m2,
+ top_n_key_count_cate_where({0}.c3,{0}.c10,d[0],1) OVER w1 as m3,
+ top_n_key_count_cate_where({0}.c4,{0}.c10,d[0],1) OVER w1 as m4,
+ top_n_key_count_cate_where({0}.c5,{0}.c10,d[0],1) OVER w1 as m5,
+ top_n_key_count_cate_where({0}.c6,{0}.c10,d[0],1) OVER w1 as m6
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 string","m3 string","m4 string","m5 string","m6 string"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [3,"aa","2:1","2:1","2:1","2:1","2:1"]
+ - [4,"aa","2:1","2:1","2:1","2:1","2:1"]
+ 1:
+ rows:
+ - [1,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [2,"aa","1:1","1:1","1:1","1:1","1:1"]
+ - [3,"aa","2:1","2:1","2:1","2:1","2:1"]
+ - [4,"aa","2:1","2:1","2:1","2:1","2:1"]
+ 2:
+ rows:
+ - [1,"aa","30:1","30:1","30:1","30:1","30:1"]
+ - [2,"aa","30:1","30:1","30:1","30:1","30:1"]
+ - [3,"aa","32:1","32:1","32:1","32:1","32:1"]
+ - [4,"aa","32:1","32:1","32:1","32:1","32:1"]
+ 3:
+ rows:
+ - [1,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"]
+ - [2,"aa","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1","2020-05-29 15:56:30:1"]
+ - [3,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"]
+ - [4,"aa","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1","2020-05-29 15:56:32:1"]
+ 4:
+ rows:
+ - [1,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"]
+ - [2,"aa","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1","2020-05-01:1"]
+ - [3,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"]
+ - [4,"aa","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1","2020-05-02:1"]
+ 5:
+ rows:
+ - [1,"aa","a:1","a:1","a:1","a:1","a:1"]
+ - [2,"aa","a:1","a:1","a:1","a:1","a:1"]
+ - [3,"aa","b:1","b:1","b:1","b:1","b:1"]
+ - [4,"aa","b:1","b:1","b:1","b:1","b:1"]
+ -
+ id: 39
+ desc: "top_n_key_*_cate_where-fail1"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["top_n_key_count_cate_where","top_n_key_sum_cate_where","top_n_key_avg_cate_where","top_n_key_max_cate_where","top_n_key_min_cate_where"]
+ - ["{0}.c2","{0}.c3","{0}.c4","{0}.c5","{0}.c6"]
+ - ["{1}.c5","{1}.c6","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],{0}.c10,d[2],1) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 40
+ desc: "top_n_key_*_cate_where-fail2"
+ sqlDialect: ["HybridSQL"]
+ level: 5
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:c7","index2:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ -
+ columns : ["id int","timecol bigint","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:id:timecol"]
+ rows:
+ - [1,1,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,2,1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [3,3,2,2,32,1.3,2.3,1590738992000,"2020-05-02","b",false]
+ - [4,4,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ dataProvider:
+ - ["top_n_key_count_cate_where","top_n_key_sum_cate_where","top_n_key_avg_cate_where","top_n_key_max_cate_where","top_n_key_min_cate_where"]
+ - ["{0}.c7","{0}.c8","{0}.c9","{0}.c10"]
+ - ["{1}.c2","{1}.c3","{1}.c4","{1}.c5","{1}.c6","{1}.c7","{1}.c8","{1}.c9","{1}.c10"]
+ sql: |
+ SELECT {0}.id, {0}.c1,
+ d[0](d[1],{0}.c10,d[2],1) OVER w1 as m2
+ FROM {0} last join {1} ORDER BY {1}.timecol on {0}.id={1}.id
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 41
+ desc: arithmetic_and_udf_before_udaf
+ inputs:
+ - columns: ["id bigint", "c1 double", "c2 float", "c3 int"]
+ indexs: ["index1:c3:id"]
+ rows:
+ - [1, 10.0, 1.0, 5]
+ - [2, 9.0, 2.0, 5]
+ - [3, 8.0, 3.0, 5]
+ - [4, 7.0, 4.0, 2]
+ - [5, 6.0, 5.0, 2]
+ sql: |
+ SELECT {0}.id,
+ sum((c1 - c2) / c3) OVER w1 AS r1,
+ sum(log(c1 + c2) + c3) OVER w1 as r2
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","r1 double", "r2 double"]
+ rows:
+ - [1, 1.8, 7.3978952727983707]
+ - [2, 3.2, 14.795790545596741]
+ - [3, 4.2, 22.19368581839511]
+ - [4, 1.5, 4.3978952727983707]
+ - [5, 2.0, 8.7957905455967413]
+
+ - id: 42
+ desc: arithmetic_and_udf_after_udaf
+ sqlDialect: ["HybridSQL"]
+ tags: ["目前只能f(udaf()) over w,否则无法进入window agg节点"]
+ inputs:
+ - columns: ["id bigint", "c1 double", "c2 float", "c3 int"]
+ indexs: ["index1:c3:id"]
+ rows:
+ - [1, 10.0, 1.0, 5]
+ - [2, 9.0, 2.0, 5]
+ - [3, 8.0, 3.0, 5]
+ - [4, 7.0, 4.0, 2]
+ - [5, 6.0, 5.0, 2]
+ sql: |
+ SELECT {0}.id,
+ abs(sum(c3)) OVER w1 as r1,
+ log((sum(c1) + sum(c2)) / c3) OVER w1 AS r2
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","r1 int", "r2 double"]
+ rows:
+ - [1, 5, 0.78845736036427028]
+ - [2, 10, 1.4816045409242156]
+ - [3, 15, 1.8870696490323797]
+ - [4, 2, 1.7047480922384253]
+ - [5, 4, 2.3978952727983707]
+
+ - id: 43
+ desc: nested udaf
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id bigint", "c1 double", "c2 float", "c3 int"]
+ indexs: ["index1:c3:id"]
+ rows:
+ - [1, 10.0, 1.0, 5]
+ - [2, 9.0, 2.0, 5]
+ - [3, 8.0, 3.0, 5]
+ - [4, 7.0, 4.0, 2]
+ - [5, 6.0, 5.0, 2]
+ sql: |
+ SELECT {0}.id,
+ sum(c1 - count(c1)) OVER w1 AS r1,
+ abs(sum(log(c1) - log(count(c1)))) OVER w1 AS r2,
+ sum(c1 + sum(c2 * count(c3))) OVER w1 AS r3
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","r1 double", "r2 double", "r3 double"]
+ rows:
+ - [1, 9.0, 2.3025850929940459, 11.0]
+ - [2, 15.0, 3.1135153092103747, 31.0]
+ - [3, 18.0, 3.2834143460057721, 81.0]
+ - [4, 6.0, 1.9459101490553132, 11.0]
+ - [5, 9.0, 2.3513752571634776, 49.0]
+
+ - id: 44
+ desc: cast after udaf
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id bigint", "c1 double", "c2 float", "c3 int"]
+ indexs: ["index1:c3:id"]
+ rows:
+ - [1, 10.0, 1.0, 5]
+ - [2, 9.0, 2.0, 5]
+ - [3, 8.0, 3.0, 5]
+ - [4, 7.0, 4.0, 2]
+ - [5, 6.0, 5.0, 2]
+ sql: |
+ SELECT {0}.id, c3,
+ CAST(sum(c1) OVER w1 AS string) AS r1,
+ string(sum(c1) OVER w1) AS r2,
+ `string`(sum(c1) OVER w1) AS r3
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "c3 int", "r1 string", "r2 string", "r3 string"]
+ rows:
+ - [1, 5, "10", "10", "10"]
+ - [2, 5, "19", "19", "19"]
+ - [3, 5, "27", "27", "27"]
+ - [4, 2, "7", "7", "7"]
+ - [5, 2, "13", "13", "13"]
+
+ - id: 45
+ desc: aggregate where
+ sqlDialect: ["HybridSQL"]
+ mode: request-unsupport
+ inputs:
+ - columns: ["id bigint", "c1 double", "c2 float", "c3 bigint"]
+ indexs: ["index1:c3:id"]
+ rows:
+ - [1, 1.0, 1.1, 0]
+ - [2, 2.0, 7.7, 0]
+ - [3, NULL, 0.1, 0]
+ - [4, 3.0, NULL, 0]
+ - [5, 4.0, 5.5, 0]
+ - [6, 5.0, 3.3, 1]
+ - [7, NULL, 2.2, 1]
+ - [8, 7.0, NULL, 1]
+ - [9, 8.0, 4.4, 1]
+ sql: |
+ SELECT {0}.id,
+ count_where(c1, c1 < c2) OVER w1 AS count_where_1,
+ avg_where(c1, c1 < c2) OVER w1 AS avg_where_1,
+ count_where(c2, c2 > 4) OVER w1 AS count_where_2,
+ avg_where(c2, c2 > 4) OVER w1 AS avg_where_2
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "count_where_1 bigint", "avg_where_1 double", "count_where_2 bigint", "avg_where_2 double"]
+ rows:
+ - [1, 1, 1.0, 0, NULL]
+ - [2, 2, 1.5, 1, 7.6999998092651367]
+ - [3, 2, 1.5, 1, 7.6999998092651367]
+ - [4, 2, 1.5, 1, 7.6999998092651367]
+ - [5, 3, 2.3333333333333335, 2, 6.5999999046325684]
+ - [6, 0, NULL, 0, NULL]
+ - [7, 0, NULL, 0, NULL]
+ - [8, 0, NULL, 0, NULL]
+ - [9, 0, NULL, 1, 4.4000000953674316]
+
+ - id: 46
+ desc: window lag functions
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","pk bigint","c1 string","c2 int","c3 bigint","c4 float",
+ "c5 double","c6 timestamp","c7 date","c8 bool"]
+ indexs: ["index1:pk:c6"]
+ rows:
+ - [1, 1, "a", 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", true]
+ - [2, 1, "c", 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", false]
+ - [3, 1, "b", 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", true,]
+ - [4, 1, NULL, NULL, NULL, NULL, NULL, 1590738993000, NULL, NULL]
+ - [5, 1, "d", 5, 35, 1.5, 2.5, 1590738994000, "2020-05-04", false]
+ sql: |
+ SELECT {0}.id,
+ lag(c1, 0) OVER w1 as m1,
+ lag(c1, 2) OVER w1 as m2,
+ lag(c2, 0) OVER w1 as m3,
+ lag(c2, 2) OVER w1 as m4,
+ lag(c3, 0) OVER w1 as m5,
+ lag(c3, 2) OVER w1 as m6,
+ lag(c4, 0) OVER w1 as m7,
+ lag(c4, 2) OVER w1 as m8,
+ lag(c5, 0) OVER w1 as m9,
+ lag(c5, 2) OVER w1 as m10,
+ lag(c6, 0) OVER w1 as m11,
+ lag(c6, 2) OVER w1 as m12,
+ lag(c7, 0) OVER w1 as m13,
+ lag(c7, 2) OVER w1 as m14,
+ lag(c8, 0) OVER w1 as m15,
+ lag(c8, 2) OVER w1 as m16
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.pk ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","m1 string", "m2 string", "m3 int", "m4 int", "m5 bigint", "m6 bigint",
+ "m7 float", "m8 float", "m9 double", "m10 double",
+ "m11 timestamp", "m12 timestamp", "m13 date", "m14 date", "m15 bool", "m16 bool"]
+ rows:
+ - [1, "a", NULL, 1, NULL, 30, NULL, 1.1, NULL, 2.1, NULL,
+ 1590738990000, NULL, "2020-05-01", NULL, true, NULL]
+ - [2, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, NULL,
+ 1590738991000, NULL, "2020-05-03", NULL, false, NULL]
+ - [3, "b", "a", 3, 1, 32, 30, 1.3, 1.1, 2.3, 2.1,
+ 1590738992000, 1590738990000, "2020-05-02", "2020-05-01", true, true]
+ - [4, NULL, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4,
+ 1590738993000, 1590738991000, NULL, "2020-05-03", NULL, false]
+ - [5, "d", "b", 5, 3, 35, 32, 1.5, 1.3, 2.5, 2.3,
+ 1590738994000, 1590738992000, "2020-05-04", "2020-05-02", false, true]
+
+ - id: 47
+ desc: count where value equals first value
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id bigint", "pk bigint", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "a"]
+ - [2, 0, "b"]
+ - [3, 0, "c"]
+ - [4, 0, NULL]
+ - [5, 0, "b"]
+ - [6, 0, NULL]
+ - [7, 0, "c"]
+ - [8, 0, "a"]
+ - [9, 0, NULL]
+ - [10, 0, "c"]
+ - [11, 0, "a"]
+ - [12, 0, "b"]
+ sql: |
+ SELECT {0}.id,
+ count_where(id, ifnull(c1, "a") = ifnull(first_value(c1), "a")) OVER w1 AS count_where
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "count_where bigint"]
+ rows:
+ - [1, 1]
+ - [2, 1]
+ - [3, 1]
+ - [4, 2]
+ - [5, 2]
+ - [6, 3]
+ - [7, 2]
+ - [8, 4]
+ - [9, 5]
+ - [10, 3]
+ - [11, 6]
+ - [12, 3]
+ - id: 48
+ desc: count where value equals lag
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id bigint", "pk bigint", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "a"]
+ - [2, 0, "b"]
+ - [3, 0, "c"]
+ - [4, 0, NULL]
+ - [5, 0, "b"]
+ - [6, 0, NULL]
+ - [7, 0, "c"]
+ - [8, 0, "a"]
+ - [9, 0, NULL]
+ - [10, 0, "c"]
+ - [11, 0, "a"]
+ - [12, 0, "b"]
+ - [13, 0, "a"]
+ - [14, 0, "a"]
+ sql: |
+ SELECT {0}.id,
+ count_where(id, ifnull(c1, "a") = ifnull(lag(c1, 0), "a")) OVER w1 AS count_where_w1,
+ count_where(id, ifnull(c1, "a") = ifnull(lag(c1, 0), "a")) OVER w2 AS count_where_w2
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS_RANGE BETWEEN 100s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "count_where_w1 bigint", "count_where_w2 bigint"]
+ rows:
+ - [1, 1, 1]
+ - [2, 1, 1]
+ - [3, 1, 1]
+ - [4, 2, 2]
+ - [5, 2, 2]
+ - [6, 3, 3]
+ - [7, 2, 2]
+ - [8, 4, 4]
+ - [9, 5, 5]
+ - [10, 3, 3]
+ - [11, 6, 6]
+ - [12, 3, 3]
+ - [13, 6, 7]
+ - [14, 7, 8]
+ - id: 49
+ desc: count where value equals case when lag
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ - columns: ["id bigint", "pk bigint", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "a"]
+ - [2, 0, "b"]
+ - [3, 0, "c"]
+ - [4, 0, NULL]
+ - [5, 0, "b"]
+ - [6, 0, NULL]
+ - [7, 0, "c"]
+ - [8, 0, "a"]
+ - [9, 0, NULL]
+ - [10, 0, "c"]
+ - [11, 0, "a"]
+ - [12, 0, "b"]
+ - [13, 0, "a"]
+ - [14, 0, "a"]
+ sql: |
+ SELECT {0}.id,
+ case when !isnull(lag(c1,0)) OVER w1 then count_where(id, c1 = lag(c1, 0)) OVER w1 else null end AS count_where_w1,
+ case when !isnull(lag(c1,0)) OVER w2 then count_where(id, c1 = lag(c1, 0)) OVER w2 else null end AS count_where_w2
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS_RANGE BETWEEN 100s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint", "count_where_w1 bigint", "count_where_w2 bigint"]
+ rows:
+ - [1, 1, 1]
+ - [2, 1, 1]
+ - [3, 1, 1]
+ - [4, NULL, NULL]
+ - [5, 2, 2]
+ - [6, NULL, NULL]
+ - [7, 2, 2]
+ - [8, 2, 2]
+ - [9, NULL, NULL]
+ - [10, 3, 3]
+ - [11, 3, 3]
+ - [12, 3, 3]
+ - [13, 3, 4]
+ - [14, 4, 5]
+ -
+ id: 50
+ desc: 重复的聚合表达式
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w1 as w1_c4_sum2
+ FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint", "w1_c4_sum2 bigint"]
+ rows:
+ - ["aa",20,30, 30]
+ - ["aa",21,61, 61]
+ - ["aa",22,93, 93]
+ - ["aa",23,96, 96]
+ - ["bb",24,34, 34]
+
+ -
+ id: 51
+ desc: 重复的聚合表达式
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w1 as w1_c4_sum2
+ FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint", "w1_c4_sum2 bigint"]
+ rows:
+ - ["aa",20,30, 30]
+ - ["aa",21,61, 61]
+ - ["aa",22,93, 93]
+ - ["aa",23,96, 96]
+ - ["bb",24,34, 34]
+
+ - id: 52
+ desc: 多个可合并窗口上的多个聚合函数计算
+ sqlDialect: ["HybridSQL"]
+ version: 0.6.0
+ sql: |
+ SELECT {0}.id, pk, col1, std_ts,
+ distinct_count(col1) OVER w1 as a1,
+ distinct_count(col1) OVER w2 as a2,
+ distinct_count(col1) OVER w3 as a3,
+ sum(col1 * 1.0) OVER w1 as b1,
+ sum(col1 * 1.0) OVER w2 as b2,
+ sum(col1 * 1.0) OVER w3 as b3
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 4 PRECEDING AND 3 PRECEDING),
+ w3 AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 6 PRECEDING AND 5 PRECEDING);
+ inputs:
+ -
+ columns: ["id int", "pk string", "col1 int32", "std_ts timestamp"]
+ indexs: ["index1:pk:std_ts"]
+ rows:
+ - [1, A, 1, 1590115420000]
+ - [2, A, 1, 1590115430000]
+ - [3, A, 2, 1590115440000]
+ - [4, A, 2, 1590115450000]
+ - [5, A, 2, 1590115460000]
+ - [6, A, 3, 1590115470000]
+ - [7, A, 3, 1590115480000]
+ - [8, A, 3, 1590115490000]
+ - [9, A, 3, 1590115500000]
+ - [10, B, 1, 1590115420000]
+ - [11, B, 2, 1590115430000]
+ - [12, B, 3, 1590115440000]
+ - [13, B, 4, 1590115450000]
+ - [14, B, 5, 1590115460000]
+ expect:
+ columns: ["id int32", "pk string", "col1 int32", "std_ts timestamp",
+ "a1 bigint", "a2 bigint", "a3 bigint",
+ "b1 double" ,"b2 double", "b3 double"]
+ order: id
+ rows:
+ - [1, A, 1, 1590115420000, 1, 0, 0, 1.0, NULL, NULL]
+ - [2, A, 1, 1590115430000, 1, 0, 0, 2.0, NULL, NULL]
+ - [3, A, 2, 1590115440000, 2, 0, 0, 4.0, NULL, NULL]
+ - [4, A, 2, 1590115450000, 2, 1, 0, 5.0, 1.0, NULL]
+ - [5, A, 2, 1590115460000, 1, 1, 0, 6.0, 2.0, NULL]
+ - [6, A, 3, 1590115470000, 2, 2, 1, 7.0, 3.0, 1.0]
+ - [7, A, 3, 1590115480000, 2, 1, 1, 8.0, 4.0, 2.0]
+ - [8, A, 3, 1590115490000, 1, 1, 2, 9.0, 4.0, 3.0]
+ - [9, A, 3, 1590115500000, 1, 2, 1, 9.0, 5.0, 4.0]
+ - [10, B, 1, 1590115420000, 1, 0, 0, 1.0, NULL, NULL]
+ - [11, B, 2, 1590115430000, 2, 0, 0, 3.0, NULL, NULL]
+ - [12, B, 3, 1590115440000, 3, 0, 0, 6.0, NULL, NULL]
+ - [13, B, 4, 1590115450000, 3, 1, 0, 9.0, 1.0, NULL]
+ - [14, B, 5, 1590115460000, 3, 2, 0, 12.0, 3.0, NULL]
+
+ - id: 53
+ desc: 同窗口下多类聚合函数
+ sqlDialect: ["HybridSQL"]
+ version: 0.6.0
+ sql: |
+ SELECT {0}.id, pk, col1, std_ts,
+ sum(col1 + count(col1)) OVER w as a1,
+ distinct_count(col1) OVER w as a2,
+ sum_where(col1, std_ts > timestamp(1590115440000)) OVER w as a3,
+ count_where(col1, std_ts > timestamp(1590115440000)) OVER w as a4,
+ avg_where(col1, std_ts > timestamp(1590115440000)) OVER w as a5,
+ sum(col1) OVER w as a6,
+ count(col1) OVER w as a7,
+ fz_topn_frequency(id, 3) OVER w as a8
+ FROM {0} WINDOW
+ w AS (PARTITION BY pk ORDER BY std_ts ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ inputs:
+ -
+ columns: ["id int", "pk string", "col1 int32", "std_ts timestamp"]
+ indexs: ["index1:pk:std_ts"]
+ rows:
+ - [1, A, 1, 1590115420000]
+ - [2, A, 2, 1590115430000]
+ - [3, A, 3, 1590115440000]
+ - [4, A, 4, 1590115450000]
+ - [5, A, 5, 1590115460000]
+ expect:
+ columns: ["id int32", "pk string", "col1 int32", "std_ts timestamp",
+ "a1 bigint", "a2 bigint", "a3 int32", "a4 bigint",
+ "a5 double" ,"a6 int32", "a7 bigint", "a8 string"]
+ order: id
+ rows:
+ - [1, A, 1, 1590115420000, 2, 1, null, 0, null, 1, 1, "1,NULL,NULL"]
+ - [2, A, 2, 1590115430000, 7, 2, null, 0, null, 3, 2, "1,2,NULL"]
+ - [3, A, 3, 1590115440000, 15, 3, null, 0, null, 6, 3, "1,2,3"]
+ - [4, A, 4, 1590115450000, 18, 3, 4, 1, 4.0, 9, 3, "2,3,4"]
+ - [5, A, 5, 1590115460000, 21, 3, 9, 2, 4.5, 12, 3, "3,4,5"]
+
+ - id: 54
+ desc: max空窗口
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float",
+ "c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, max(c2) OVER w1 as m2, max(c3) OVER w1 as m3, max(c4) OVER w1 as m4,
+ max(c5) OVER w1 as m5,max(c6) OVER w1 as m6,max(c7) OVER w1 as m7,
+ max(c8) OVER w1 as m8,max(c9) OVER w1 as m9
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float",
+ "m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [1,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ - [2,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [4,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c"]
+
+ - id: 55
+ desc: min空窗口
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, min(c2) OVER w1 as m2,min(c3) OVER w1 as m3,min(c4) OVER w1 as m4,min(c5) OVER w1 as m5,min(c6) OVER w1 as m6,min(c7) OVER w1 as m7,min(c8) OVER w1 as m8,min(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 2 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 smallint","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [1,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ - [2,"aa",NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL]
+ - [3,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - [4,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a"]
+ - id: 56
+ desc: window at functions, at is synonym to lag
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int","pk bigint","c1 string","c2 int","c3 bigint","c4 float",
+ "c5 double","c6 timestamp","c7 date","c8 bool"]
+ indexs: ["index1:pk:c6"]
+ rows:
+ - [1, 1, "a", 1, 30, 1.1, 2.1, 1590738990000, "2020-05-01", true]
+ - [2, 1, "c", 4, 33, 1.4, 2.4, 1590738991000, "2020-05-03", false]
+ - [3, 1, "b", 3, 32, 1.3, 2.3, 1590738992000, "2020-05-02", true,]
+ - [4, 1, NULL, NULL, NULL, NULL, NULL, 1590738993000, NULL, NULL]
+ - [5, 1, "d", 5, 35, 1.5, 2.5, 1590738994000, "2020-05-04", false]
+ sql: |
+ SELECT {0}.id,
+ at(c1, 0) OVER w1 as m1,
+ at(c1, 2) OVER w1 as m2,
+ at(c2, 0) OVER w1 as m3,
+ at(c2, 2) OVER w1 as m4,
+ at(c3, 0) OVER w1 as m5,
+ at(c3, 2) OVER w1 as m6,
+ at(c4, 0) OVER w1 as m7,
+ at(c4, 2) OVER w1 as m8,
+ at(c5, 0) OVER w1 as m9,
+ at(c5, 2) OVER w1 as m10,
+ at(c6, 0) OVER w1 as m11,
+ at(c6, 2) OVER w1 as m12,
+ at(c7, 0) OVER w1 as m13,
+ at(c7, 2) OVER w1 as m14,
+ at(c8, 0) OVER w1 as m15,
+ at(c8, 2) OVER w1 as m16
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.pk ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","m1 string", "m2 string", "m3 int", "m4 int", "m5 bigint", "m6 bigint",
+ "m7 float", "m8 float", "m9 double", "m10 double",
+ "m11 timestamp", "m12 timestamp", "m13 date", "m14 date", "m15 bool", "m16 bool"]
+ rows:
+ - [1, "a", NULL, 1, NULL, 30, NULL, 1.1, NULL, 2.1, NULL,
+ 1590738990000, NULL, "2020-05-01", NULL, true, NULL]
+ - [2, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4, NULL,
+ 1590738991000, NULL, "2020-05-03", NULL, false, NULL]
+ - [3, "b", "a", 3, 1, 32, 30, 1.3, 1.1, 2.3, 2.1,
+ 1590738992000, 1590738990000, "2020-05-02", "2020-05-01", true, true]
+ - [4, NULL, "c", NULL, 4, NULL, 33, NULL, 1.4, NULL, 2.4,
+ 1590738993000, 1590738991000, NULL, "2020-05-03", NULL, false]
+ - [5, "d", "b", 5, 3, 35, 32, 1.5, 1.3, 2.5, 2.3,
+ 1590738994000, 1590738992000, "2020-05-04", "2020-05-02", false, true]
+
+ - id: 57
+ desc: |
+ correctness for at/lag when offset out-of-range rows_range window frame bound.
+ keynote, lag returns value evaluated at the row that is offset rows before the current row within the partition.
+ refer https://github.com/4paradigm/OpenMLDB/issues/1554
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130404000, g2, 4
+ 7, 1612130405000, g2, 3
+ 8, 1612130406000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ lag(val1, 0) over w1 as agg1,
+ lag(val1, 1) over w1 as agg2,
+ lag(val1, 3) over w1 as agg3
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows_range between 2s preceding and 1s preceding MAXSIZE 10);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, 1, NULL]
+ - [3, 3, 3, 2, NULL]
+ - [4, 4, 4, 3, 1]
+ - [5, 5, 5, 4, 2]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, 4, NULL]
+ - [8, 2, 2, 3, NULL]
+
+ - id: 58
+ desc: |
+ correctness for at/lag when offset out-of-range rows_range window frame bound, together with other window function.
+ refer https://github.com/4paradigm/OpenMLDB/issues/1554
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130405000, g2, 4
+ 7, 1612130406000, g2, 3
+ 8, 1612130407000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ lag(val1, 0) over w1 as agg1,
+ lag(val1, 3) over w1 as agg2,
+ first_value(val1) over w1 as agg3
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows_range between 2s preceding and 1s preceding MAXSIZE 10);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, NULL, 1]
+ - [3, 3, 3, NULL, 2]
+ - [4, 4, 4, 1, 3]
+ - [5, 5, 5, 2, 4]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, NULL, 4]
+ - [8, 2, 2, NULL, 3]
+
+ - id: 59
+ desc: |
+ correctness for at/lag when offset out-of-range window frame bound.
+ keynote, lag returns value evaluated at the row that is offset rows before the current row within the partition.
+ refer https://github.com/4paradigm/OpenMLDB/issues/1554
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130405000, g2, 4
+ 7, 1612130406000, g2, 3
+ 8, 1612130407000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ lag(val1, 0) over w1 as agg1,
+ lag(val1, 1) over w1 as agg2,
+ lag(val1, 3) over w1 as agg3
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows between 2 preceding and 1 preceding);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, 1, NULL]
+ - [3, 3, 3, 2, NULL]
+ - [4, 4, 4, 3, 1]
+ - [5, 5, 5, 4, 2]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, 4, NULL]
+ - [8, 2, 2, 3, NULL]
+
+ - id: 60
+ desc: |
+ correctness for at/lag when offset out-of-range rows window frame bound
+ refer https://github.com/4paradigm/OpenMLDB/issues/1554
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130405000, g2, 4
+ 7, 1612130406000, g2, 3
+ 8, 1612130407000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ lag(val1, 0) over w1 as agg1,
+ lag(val1, 3) over w1 as agg2,
+ first_value(val1) over w1 as agg3
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows between 2 preceding and 1 preceding);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int", "agg3 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL, NULL]
+ - [2, 2, 2, NULL, 1]
+ - [3, 3, 3, NULL, 2]
+ - [4, 4, 4, 1, 3]
+ - [5, 5, 5, 2, 4]
+ - [6, 4, 4, NULL, NULL]
+ - [7, 3, 3, NULL, 4]
+ - [8, 2, 2, NULL, 3]
+
+ - id: 61
+ desc: median
+ sqlDialect: ["HybridSQL"]
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string","c10 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01","a",true]
+ - [2,"aa",4,4,33,1.4,2.4,1590738991000,"2020-05-03","c",false]
+ - [3,"aa",1,1,33,1.1,2.1,1590738992000,"2020-05-02","b",true]
+ - [4,"aa",NULL,NULL,NULL,NULL,NULL,1590738993000,NULL,NULL,NULL]
+ sql: |
+ SELECT {0}.id, c1, median(c2) OVER w1 as m2,median(c3) OVER w1 as m3,median(c4) OVER w1 as m4,median(c5) OVER w1 as m5,median(c6) OVER w1 as m6 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m2 double","m3 double","m4 double","m5 double","m6 double"]
+ rows:
+ - [1,"aa",1,1,30,1.1000000238418579,2.1]
+ - [2,"aa",2.5,2.5,31.5,1.25,2.25]
+ - [3,"aa",1,1,33,1.1000000238418579,2.1]
+ - [4,"aa",2.5,2.5,33,1.25,2.25]
diff --git a/cases/integration_test/function/test_udaf_table.yaml b/cases/integration_test/function/test_udaf_table.yaml
new file mode 100644
index 00000000000..b7771321e39
--- /dev/null
+++ b/cases/integration_test/function/test_udaf_table.yaml
@@ -0,0 +1,114 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: "count(*)"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select count(*) as v1 from {0};
+ expect:
+ columns: ["v1 bigint"]
+ rows:
+ - [5]
+ - id: 1
+ desc: "count(1)"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select count(1) as v1 from {0};
+ expect:
+ success: false
+ - id: 2
+ desc: "count/sum/max/min/avg一个列"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
+ expect:
+ order: c1
+ columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"]
+ rows:
+ - [6,6,1,3.5,21]
+ - id: 3
+ desc: "表是空的"
+ tags: ["TODO","@chengjing,bug,"]
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ sql: select count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
+ expect:
+ order: c1
+ columns: ["v1 int","v2 int","v3 int","v4 double","v5 int"]
+ rows:
+ - [0,0,0,0,0]
+ - id: 4
+ desc: "列有null和空串"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",null,1590738992000]
+ - [4,null,4,1590738993000]
+ - [5,"",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select count(c1) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0};
+ expect:
+ order: c1
+ columns: ["v1 bigint","v2 int","v3 int","v4 double","v5 int"]
+ rows:
+ - [5,6,1,3.6,18]
+
+
+
+
+
+
+
diff --git a/cases/integration_test/function/test_udf_function.yaml b/cases/integration_test/function/test_udf_function.yaml
new file mode 100644
index 00000000000..7165f09182a
--- /dev/null
+++ b/cases/integration_test/function/test_udf_function.yaml
@@ -0,0 +1,89 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+sqlDialect: ["HybridSQL"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 默认udf null处理逻辑:返回null
+ inputs:
+ - columns: ["id int64", "c1 string", "c2 int", "c3 double",
+ "c4 date", "c5 timestamp", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, NULL, 42, 3.14, "2020-05-20", 1590738989000, 1590738989000]
+ - [2, "hello world", NULL, NULL, NULL, NULL, 1590738989000]
+ sql: select id,
+ substring(c1, 1, 5) as r1,
+ substring(c1, 1, c2) as r2,
+ pow(c2, 2) as r3,
+ floor(c3) as r4,
+ dayofweek(c4) as r5,
+ dayofweek(c5) as r6
+ from {0};
+ expect:
+ order: id
+ columns: ["id int64", "r1 string", "r2 string", "r3 double", "r4 double",
+ "r5 int", "r6 int"]
+ rows:
+ - [1, NULL, NULL, 1764, 3.00, 4, 6]
+ - [2, "hello", NULL, NULL, NULL, NULL, NULL]
+
+ - id: 1
+ desc: udf使用中间结果null值
+ inputs:
+ - columns: ["id int64", "c1 string", "c2 int", "c3 double",
+ "c4 date", "c5 timestamp", "std_ts timestamp"]
+ indexs: ["index1:id:std_ts"]
+ rows:
+ - [1, NULL, 42, 3.14, "2020-05-20", 1590738989000, 1590738989000]
+ - [2, "hello world", NULL, NULL, NULL, NULL, 1590738989000]
+ sql: select id,
+ substring(substring(c1, 1, 5), 1, 1) as r1,
+ substring(substring(c1, 1, c2), c2, 1) as r2,
+ abs(pow(c2, 2)) as r3,
+ abs(floor(c3)) as r4,
+ abs(dayofweek(c4)) as r5,
+ abs(dayofweek(c5)) as r6
+ from {0};
+ expect:
+ order: id
+ columns: ["id int64", "r1 string", "r2 string", "r3 double", "r4 double",
+ "r5 int", "r6 int"]
+ rows:
+ - [1, NULL, NULL, 1764, 3.00, 4, 6]
+ - [2, "h", NULL, NULL, NULL, NULL, NULL]
+
+ - id: 2
+ desc: 函数名大小写不敏感
+ inputs:
+ - columns: ["id int64", "c1 double", "c2 timestamp"]
+ indexs: ["index1:id:c2"]
+ rows:
+ - [1, 1.0, 1590738989000]
+ sql: select id,
+ SUM(c1) over w as r1, sUm(c1) over w as r2, sum(c1) over w as r3, log(c1) as r4
+ from {0} window w as (PARTITION BY id ORDER BY c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int64", "r1 double", "r2 double", "r3 double", "r4 double"]
+ rows:
+ - [1, 1, 1, 1, 0]
+
+
+
+
+
diff --git a/cases/integration_test/fz_ddl/test_bank.yaml b/cases/integration_test/fz_ddl/test_bank.yaml
new file mode 100644
index 00000000000..4b725afd22c
--- /dev/null
+++ b/cases/integration_test/fz_ddl/test_bank.yaml
@@ -0,0 +1,151 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: bank
+version: 0.5.0
+cases:
+ - desc: bank test
+ id: 0
+ inputs:
+ - columns: [ reqId string, eventTime timestamp, main_id string, new_user_id string,
+ loan_ts bigint, split_id int, time1 string ]
+ indexs: [ "index1:new_user_id:eventTime" ]
+ name: flattenRequest
+ - columns: [reqId string, eventTime timestamp, ingestionTime timestamp, actionValue
+ int]
+ indexs: ["index1:reqId:eventTime"]
+ name: action
+ - columns: [ingestionTime timestamp, new_user_id string, trx_ts bigint, trx_typ
+ string, trx_amt double, is_slry string]
+ indexs: ["index1:new_user_id:ingestionTime"]
+ name: bo_detail
+ - columns: [ingestionTime timestamp, new_user_id string, bill_ts bigint, bank_id string,
+ lst_bill_amt double, lst_repay_amt double, card_limit double, cur_blc double, cur_bill_min_repay double,
+ buy_cnt double, cur_bill_amt double, adj_amt double, rev_credit double, avl_amt double, advc_limit double, repay_status string]
+ indexs: ["index1:new_user_id:ingestionTime"]
+ name: bo_bill_detail
+ - columns: [ingestionTime timestamp, new_user_id string, sex string, prof string,
+ edu string, marriage string, hukou_typ string]
+ indexs: ["index1:new_user_id:ingestionTime"]
+ name: bo_user
+ - columns: [ingestionTime timestamp, new_user_id string, bws_ts bigint, action string,
+ subaction string]
+ indexs: ["index1:new_user_id:ingestionTime"]
+ name: bo_browse_history
+ batch_request:
+ columns: [reqId string, eventTime timestamp, main_id string, new_user_id string,
+ loan_ts bigint, split_id int, time1 string]
+ indexs: ["index1:new_user_id:eventTime"]
+ common_column_indices: [1, 2, 3, 4, 5]
+ rows:
+ - [reqId1, 1609894067190, "main_id1", "new_user_id1", 1609894067190, 1, "time1_1"]
+ expect:
+ success: true
+ sql: "select * from \n(\nselect\n reqId as reqId_1,\n `reqId` as flattenRequest_reqId_original_0,\n\
+ \ `eventTime` as flattenRequest_eventTime_original_1,\n `main_id` as flattenRequest_main_id_original_2,\n\
+ \ `new_user_id` as flattenRequest_new_user_id_original_3\nfrom\n `flattenRequest`\n\
+ \ )\nas out0\nlast join\n(\nselect\n flattenRequest.reqId as reqId_5,\n\
+ \ `action_reqId`.`actionValue` as action_actionValue_multi_direct_4,\n `bo_user_new_user_id`.`edu`\
+ \ as bo_user_edu_multi_direct_5,\n `bo_user_new_user_id`.`hukou_typ` as bo_user_hukou_typ_multi_direct_6,\n\
+ \ `bo_user_new_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_7,\n\
+ \ `bo_user_new_user_id`.`marriage` as bo_user_marriage_multi_direct_8,\n \
+ \ `bo_user_new_user_id`.`prof` as bo_user_prof_multi_direct_9,\n `bo_user_new_user_id`.`sex`\
+ \ as bo_user_sex_multi_direct_10\nfrom\n `flattenRequest`\n last join `action`\
+ \ as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`\n \
+ \ last join `bo_user` as `bo_user_new_user_id` on `flattenRequest`.`new_user_id`\
+ \ = `bo_user_new_user_id`.`new_user_id`)\nas out1\non out0.reqId_1 = out1.reqId_5\n\
+ last join\n(\nselect\n reqId as reqId_12,\n max(`adj_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_adj_amt_multi_max_11,\n min(`adj_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_adj_amt_multi_min_12,\n max(`advc_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_advc_limit_multi_max_13,\n avg(`advc_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_advc_limit_multi_avg_14,\n min(`avl_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_avl_amt_multi_min_15,\n avg(`avl_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_avl_amt_multi_avg_16,\n min(`buy_cnt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_buy_cnt_multi_min_17,\n min(`buy_cnt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_bill_detail_buy_cnt_multi_min_18,\n max(`card_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_card_limit_multi_max_19,\n min(`card_limit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_card_limit_multi_min_20,\n max(`cur_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_cur_bill_amt_multi_max_21,\n max(`cur_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_bill_detail_cur_bill_amt_multi_max_22,\n min(`cur_bill_min_repay`)\
+ \ over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_cur_bill_min_repay_multi_min_23,\n\
+ \ max(`cur_bill_min_repay`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_cur_bill_min_repay_multi_max_24,\n max(`cur_blc`) over\
+ \ bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_cur_blc_multi_max_25,\n\
+ \ max(`cur_blc`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_bill_detail_cur_blc_multi_max_26,\n max(`lst_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_lst_bill_amt_multi_max_27,\n avg(`lst_bill_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_bill_detail_lst_bill_amt_multi_avg_28,\n avg(`lst_repay_amt`) over\
+ \ bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_lst_repay_amt_multi_avg_29,\n\
+ \ max(`lst_repay_amt`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_lst_repay_amt_multi_max_30,\n min(`rev_credit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_rev_credit_multi_min_31,\n avg(`rev_credit`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_rev_credit_multi_avg_32,\n fz_topn_frequency(`bank_id`,\
+ \ 3) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_bank_id_multi_top3frequency_33,\n\
+ \ distinct_count(`bank_id`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_bank_id_multi_unique_count_34,\n fz_topn_frequency(`repay_status`,\
+ \ 3) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s as bo_bill_detail_repay_status_multi_top3frequency_35,\n\
+ \ distinct_count(`repay_status`) over bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as bo_bill_detail_repay_status_multi_unique_count_36\nfrom\n (select `eventTime`\
+ \ as `ingestionTime`, `new_user_id` as `new_user_id`, bigint(0) as `bill_ts`,\
+ \ '' as `bank_id`, double(0) as `lst_bill_amt`, double(0) as `lst_repay_amt`,\
+ \ double(0) as `card_limit`, double(0) as `cur_blc`, double(0) as `cur_bill_min_repay`,\
+ \ double(0) as `buy_cnt`, double(0) as `cur_bill_amt`, double(0) as `adj_amt`,\
+ \ double(0) as `rev_credit`, double(0) as `avl_amt`, double(0) as `advc_limit`,\
+ \ '' as `repay_status`, reqId from `flattenRequest`)\n window bo_bill_detail_new_user_id_ingestionTime_0s_2764801s\
+ \ as (\nUNION (select `ingestionTime`, `new_user_id`, `bill_ts`, `bank_id`, `lst_bill_amt`,\
+ \ `lst_repay_amt`, `card_limit`, `cur_blc`, `cur_bill_min_repay`, `buy_cnt`, `cur_bill_amt`,\
+ \ `adj_amt`, `rev_credit`, `avl_amt`, `advc_limit`, `repay_status`, '' as reqId\
+ \ from `bo_bill_detail`) partition by `new_user_id` order by `ingestionTime` rows_range\
+ \ between 2764801s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n bo_bill_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as (\nUNION (select `ingestionTime`, `new_user_id`, `bill_ts`, `bank_id`, `lst_bill_amt`,\
+ \ `lst_repay_amt`, `card_limit`, `cur_blc`, `cur_bill_min_repay`, `buy_cnt`, `cur_bill_amt`,\
+ \ `adj_amt`, `rev_credit`, `avl_amt`, `advc_limit`, `repay_status`, '' as reqId\
+ \ from `bo_bill_detail`) partition by `new_user_id` order by `ingestionTime` rows_range\
+ \ between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW))\nas out2\n\
+ on out0.reqId_1 = out2.reqId_12\nlast join\n(\nselect\n reqId as reqId_38,\n\
+ \ distinct_count(`action`) over bo_browse_history_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_browse_history_action_multi_unique_count_37,\n distinct_count(`action`)\
+ \ over bo_browse_history_new_user_id_ingestionTime_0_10 as bo_browse_history_action_multi_unique_count_38,\n\
+ \ distinct_count(`subaction`) over bo_browse_history_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_browse_history_subaction_multi_unique_count_39,\n distinct_count(`subaction`)\
+ \ over bo_browse_history_new_user_id_ingestionTime_0_10 as bo_browse_history_subaction_multi_unique_count_40\n\
+ from\n (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`,\
+ \ bigint(0) as `bws_ts`, '' as `action`, '' as `subaction`, reqId from `flattenRequest`)\n\
+ \ window bo_browse_history_new_user_id_ingestionTime_0s_5529601s as (\nUNION\
+ \ (select `ingestionTime`, `new_user_id`, `bws_ts`, `action`, `subaction`, ''\
+ \ as reqId from `bo_browse_history`) partition by `new_user_id` order by `ingestionTime`\
+ \ rows_range between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n\
+ \ bo_browse_history_new_user_id_ingestionTime_0_10 as (\nUNION (select `ingestionTime`,\
+ \ `new_user_id`, `bws_ts`, `action`, `subaction`, '' as reqId from `bo_browse_history`)\
+ \ partition by `new_user_id` order by `ingestionTime` rows_range between 10 preceding\
+ \ and 0 preceding INSTANCE_NOT_IN_WINDOW))\nas out3\non out0.reqId_1 = out3.reqId_38\n\
+ last join\n(\nselect\n reqId as reqId_42,\n max(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_detail_trx_amt_multi_max_41,\n avg(`trx_amt`) over bo_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as bo_detail_trx_amt_multi_avg_42,\n distinct_count(`is_slry`) over bo_detail_new_user_id_ingestionTime_0_10\
+ \ as bo_detail_is_slry_multi_unique_count_43,\n distinct_count(`is_slry`) over\
+ \ bo_detail_new_user_id_ingestionTime_0s_5529601s as bo_detail_is_slry_multi_unique_count_44,\n\
+ \ distinct_count(`trx_typ`) over bo_detail_new_user_id_ingestionTime_0_10 as\
+ \ bo_detail_trx_typ_multi_unique_count_45,\n distinct_count(`trx_typ`) over\
+ \ bo_detail_new_user_id_ingestionTime_0s_5529601s as bo_detail_trx_typ_multi_unique_count_46\n\
+ from\n (select `eventTime` as `ingestionTime`, `new_user_id` as `new_user_id`,\
+ \ bigint(0) as `trx_ts`, '' as `trx_typ`, double(0) as `trx_amt`, '' as `is_slry`,\
+ \ reqId from `flattenRequest`)\n window bo_detail_new_user_id_ingestionTime_0s_5529601s\
+ \ as (\nUNION (select `ingestionTime`, `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`,\
+ \ `is_slry`, '' as reqId from `bo_detail`) partition by `new_user_id` order by\
+ \ `ingestionTime` rows_range between 5529601s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),\n\
+ \ bo_detail_new_user_id_ingestionTime_0_10 as (\nUNION (select `ingestionTime`,\
+ \ `new_user_id`, `trx_ts`, `trx_typ`, `trx_amt`, `is_slry`, '' as reqId from `bo_detail`)\
+ \ partition by `new_user_id` order by `ingestionTime` rows_range between 10 preceding\
+ \ and 0 preceding INSTANCE_NOT_IN_WINDOW))\nas out4\non out0.reqId_1 = out4.reqId_42\n\
+ ;"
diff --git a/cases/integration_test/fz_ddl/test_luoji.yaml b/cases/integration_test/fz_ddl/test_luoji.yaml
new file mode 100644
index 00000000000..65b8056909f
--- /dev/null
+++ b/cases/integration_test/fz_ddl/test_luoji.yaml
@@ -0,0 +1,293 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: luoji
+version: 0.5.0
+cases:
+- id: 0
+ desc: luoji test
+ mode: rtidb-batch-unsupport
+ inputs:
+ - columns: [
+ reqId string,
+ eventTime timestamp,
+ f_requestId string,
+ f_cId string,
+ f_uId string,
+ f_cSrc string,
+ f_cLength double]
+ indexs: [
+ index1:f_requestId:eventTime,
+ index2:f_uId:eventTime]
+ repeat: 100
+ name: flattenRequest
+ rows:
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-2, f_uId1, f_cSrc1-2, 1.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2-1, f_uId2, f_cSrc2-1, 2.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2-2, f_uId2, f_cSrc2-2, 2.0]
+ - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0]
+ - columns: [
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ ]
+ indexs: [index1:reqId:null:1:latest]
+ name: action
+ rows:
+ - [reqId1, 1609894067191, 1609894067191, 1]
+ - [NULL, 1609894067191, 1609894067191, 3]
+ sql: |
+ select * from
+ (
+ select
+ reqId as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ `f_requestId` as flattenRequest_f_requestId_original_2,
+ `f_cId` as flattenRequest_f_cId_original_3,
+ `f_cSrc` as flattenRequest_f_cSrc_original_8,
+ `f_uId` as flattenRequest_f_uId_original_17,
+ `f_cLength` as flattenRequest_f_cLength_original_10,
+ sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cLength_window_sum_32,
+ distinct_count(`f_cId`) over flattenRequest_f_uId_eventTime_0_10 as flattenRequest_f_cId_window_unique_count_38,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0_10 as flattenRequest_f_cId_window_top1_ratio_39,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_40,
+ sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_432001s as flattenRequest_f_cLength_window_sum_41,
+ case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_42,
+ case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_uId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_uId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_43,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0_10 as flattenRequest_f_cId_window_top1_ratio_44,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_45,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_uId_eventTime_0s_432001s as flattenRequest_f_cId_window_top1_ratio_46,
+ case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_47,
+ case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_uId_eventTime_0s_432001s then count(`f_cId`) over flattenRequest_f_uId_eventTime_0s_432001s else null end as flattenRequest_f_cId_window_count_48,
+ case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_uId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_uId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_49
+ from
+ `flattenRequest`
+ window flattenRequest_f_requestId_eventTime_0s_604801s as (partition by `f_requestId` order by `eventTime` rows_range between 604801s preceding and 0s preceding),
+ flattenRequest_f_uId_eventTime_0_10 as (partition by `f_uId` order by `eventTime` rows_range between 10 preceding and 0 preceding),
+ flattenRequest_f_requestId_eventTime_0_10 as (partition by `f_requestId` order by `eventTime` rows_range between 10 preceding and 0 preceding),
+ flattenRequest_f_requestId_eventTime_0s_432001s as (partition by `f_requestId` order by `eventTime` rows_range between 432001s preceding and 0s preceding),
+ flattenRequest_f_uId_eventTime_0s_604801s as (partition by `f_uId` order by `eventTime` rows_range between 604801s preceding and 0s preceding),
+ flattenRequest_f_uId_eventTime_0s_432001s as (partition by `f_uId` order by `eventTime` rows_range between 432001s preceding and 0s preceding))
+ as out0
+ last join
+ (
+ select
+ flattenRequest.reqId as reqId_32,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_31
+ from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`)
+ as out1
+ on out0.reqId_1 = out1.reqId_32;
+ batch_request:
+ columns: [
+ reqId string,
+ eventTime timestamp,
+ f_requestId string,
+ f_cId string,
+ f_uId string,
+ f_cSrc string,
+ f_cLength double ]
+ rows:
+ - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, f_cSrc1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, NULL, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, NULL, f_uId1, f_cSrc1, 1.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2, f_uId2, f_cSrc2, 2.0]
+ - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0]
+ expect:
+ success: true
+ schema: reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_f_requestId_original_2:string, flattenRequest_f_cId_original_3:string, flattenRequest_f_cSrc_original_8:string, flattenRequest_f_uId_original_17:string, flattenRequest_f_cLength_original_10:double, flattenRequest_f_cLength_window_sum_32:double, flattenRequest_f_cId_window_unique_count_38:bigint, flattenRequest_f_cId_window_top1_ratio_39:double, flattenRequest_f_cId_window_top1_ratio_40:double, flattenRequest_f_cLength_window_sum_41:double, flattenRequest_f_cSrc_window_count_42:bigint, flattenRequest_f_cSrc_window_count_43:bigint, flattenRequest_f_cId_window_top1_ratio_44:double, flattenRequest_f_cId_window_top1_ratio_45:double, flattenRequest_f_cId_window_top1_ratio_46:double, flattenRequest_f_cId_window_count_47:bigint, flattenRequest_f_cId_window_count_48:bigint, flattenRequest_f_cId_window_count_49:bigint, reqId_32:string, action_actionValue_multi_direct_31:int
+ rows:
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, f_cSrc1, f_uId1, 1.000000,
+ 301.000000, # flattenRequest_f_cLength_window_sum_32
+ 3, # distinct_count f_cId1, f_cId1-1 f_cId1-2
+ 0.66445182724252494, # fz_top1_ratio f_cId1-1:200, f_cId1-2:100 f_cId1:1 -> 200/301
+ 0.66445182724252494,
+ 301.000000,
+ 301,
+ 301,
+ 0.66445182724252494,
+ 0.66445182724252494,
+ 0.66445182724252494,
+ 301, 301, 301, reqId1, 1 ]
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, NULL, f_uId1, 1.000000,
+ 301.000000,
+ 3,
+ 0.66445182724252494,
+ 0.66445182724252494,
+ 301.000000,
+ NULL, # case when !isnull(lag(`f_cSrc`, 0)) ... else NULL end
+ NULL, # case when !isnull(lag(`f_cSrc`, 0)) ... else NULL end
+ 0.66445182724252494,
+ 0.66445182724252494,
+ 0.66445182724252494,
+ 301, 301, 301, reqId1, 1 ]
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, NULL, f_cSrc1, f_uId1, 1.000000,
+ 301.000000, # flattenRequest_f_cLength_window_sum_32
+ 3, # distinct_count f_cId1, f_cId1-1 f_cId1-2
+ 0.66666666666666663,
+ 0.66666666666666663,
+ 301.000000,
+ 301,
+ 301,
+ 0.66666666666666663,
+ 0.66666666666666663,
+ 0.66666666666666663,
+ NULL, NULL, NULL, # case when !isnull(lag(`f_cId`, 0)) then ... else NULL
+ reqId1, 1 ]
+ - [reqId2, reqId2, 1609894067190, f_requestId2, f_cId2, f_cSrc2, f_uId2, 2.000000,
+ 402.000000,
+ 3,
+ 0.49751243781094528, 0.49751243781094528,
+ 402.000000,
+ 201, 201,
+ 0.49751243781094528, 0.49751243781094528, 0.49751243781094528,
+ 201, 201, 201,
+ reqId2, NULL]
+ - [ NULL, NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_cSrcNul, f_uIdNull, 3.000000,
+ 303.000000,
+ 1,
+ 1.000000, 1.000000,
+ 303.000000,
+ 101, 101,
+ 1.000000, 1.000000, 1.000000,
+ 101, 101, 101,
+ NULL, 3 ]
+
+- id: 1
+ desc: luoji test window flattenRequest_f_requestId_eventTime_0s_604801s without ttl
+ mode: rtidb-batch-unsupport
+ inputs:
+ - columns: [
+ reqId string,
+ eventTime timestamp,
+ f_requestId string,
+ f_cId string,
+ f_uId string,
+ f_cSrc string,
+ f_cLength double]
+ indexs: [
+ index1:f_requestId:eventTime,
+ index2:f_uId:eventTime]
+ repeat: 100
+ name: flattenRequest
+ rows:
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-1, f_uId1, f_cSrc1-1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1-2, f_uId1, f_cSrc1-2, 1.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2-1, f_uId2, f_cSrc2-1, 2.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2-2, f_uId2, f_cSrc2-2, 2.0]
+ - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0]
+ - columns: [
+ reqId string,
+ eventTime timestamp,
+ ingestionTime timestamp,
+ actionValue int,
+ ]
+ indexs: [index1:reqId:null:1:latest]
+ name: action
+ rows:
+ - [reqId1, 1609894067191, 1609894067191, 1]
+ - [NULL, 1609894067191, 1609894067191, 3]
+ sql: |
+ select * from
+ (
+ select
+ reqId as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ `f_requestId` as flattenRequest_f_requestId_original_2,
+ `f_cId` as flattenRequest_f_cId_original_3,
+ `f_cSrc` as flattenRequest_f_cSrc_original_8,
+ `f_uId` as flattenRequest_f_uId_original_17,
+ `f_cLength` as flattenRequest_f_cLength_original_10,
+ sum(`f_cLength`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cLength_window_sum_32,
+ fz_top1_ratio(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s as flattenRequest_f_cId_window_top1_ratio_40,
+ case when !isnull(lag(`f_cSrc`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cSrc`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cSrc_window_count_42,
+ case when !isnull(lag(`f_cId`, 0)) over flattenRequest_f_requestId_eventTime_0s_604801s then count(`f_cId`) over flattenRequest_f_requestId_eventTime_0s_604801s else null end as flattenRequest_f_cId_window_count_47
+ from
+ `flattenRequest`
+ window flattenRequest_f_requestId_eventTime_0s_604801s as (partition by `f_requestId` order by `eventTime` rows_range between 604801s preceding and 0s preceding))
+ as out0
+ last join
+ (
+ select
+ flattenRequest.reqId as reqId_32,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_31
+ from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`)
+ as out1
+ on out0.reqId_1 = out1.reqId_32;
+ batch_request:
+ columns: [
+ reqId string,
+ eventTime timestamp,
+ f_requestId string,
+ f_cId string,
+ f_uId string,
+ f_cSrc string,
+ f_cLength double ]
+ rows:
+ - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, f_cSrc1, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, f_cId1, f_uId1, NULL, 1.0]
+ - [reqId1, 1609894067190, f_requestId1, NULL, f_uId1, f_cSrc1, 1.0]
+ - [reqId2, 1609894067190, f_requestId2, f_cId2, f_uId2, f_cSrc2, 2.0]
+ - [NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_uIdNull, f_cSrcNul, 3.0]
+ expect:
+ success: true
+ schema: reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_f_requestId_original_2:string, flattenRequest_f_cId_original_3:string, flattenRequest_f_cSrc_original_8:string, flattenRequest_f_uId_original_17:string, flattenRequest_f_cLength_original_10:double, flattenRequest_f_cLength_window_sum_32:double, flattenRequest_f_cId_window_top1_ratio_40:double, flattenRequest_f_cSrc_window_count_42:bigint, flattenRequest_f_cId_window_count_47:bigint, reqId_32:string, action_actionValue_multi_direct_31:int
+ rows:
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, f_cSrc1, f_uId1, 1.000000, 301.000000, 0.66445182724252494, 301, 301, reqId1, 1 ]
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, f_cId1, NULL, f_uId1, 1.000000, 301.000000, 0.66445182724252494, NULL, 301, reqId1, 1 ]
+ - [ reqId1, reqId1, 1609894067190, f_requestId1, NULL, f_cSrc1, f_uId1, 1.000000, 301.000000, 0.66666666666666663, 301, NULL, reqId1, 1 ]
+ - [ reqId2, reqId2, 1609894067190, f_requestId2, f_cId2, f_cSrc2, f_uId2, 2.000000, 402.000000, 0.49751243781094528, 201, 201, reqId2, NULL ]
+ - [ NULL, NULL, 1609894067190, f_requestIdNull, f_cIdNull, f_cSrcNul, f_uIdNull, 3.000000, 303.000000, 1.000000, 101, 101, NULL, 3 ]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/cases/integration_test/fz_ddl/test_myhug.yaml b/cases/integration_test/fz_ddl/test_myhug.yaml
new file mode 100644
index 00000000000..02d0f971040
--- /dev/null
+++ b/cases/integration_test/fz_ddl/test_myhug.yaml
@@ -0,0 +1,314 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: mybug
+version: 0.5.0
+cases:
+- id: 0
+ desc: mybug test
+ mode: rtidb-batch-unsupport
+ inputs:
+ -
+ columns: ["reqId string","eventTime timestamp","uUserId string","zUserId string",
+ "uSex string","zSex string","zChannel string","uPlayGame string",
+ "uHasJoinedGroup string","uWatchMorning double","uWatchEvening double",
+ "uWatchAvgLength double","zSWihsperNum double" ]
+ indexs: [
+ "index1:uUserId:eventTime",
+ "index2:zChannel:eventTime",
+ "index3:uSex:eventTime",
+ "index4:zUserId:eventTime",
+ "index5:uPlayGame:eventTime",
+ "index6:uHasJoinedGroup:eventTime",
+ "index7:zUserId|uUserId:eventTime" ]
+ repeat: 100
+ name: flattenRequest
+ rows:
+ - [reqId1, 1609894067190, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0]
+ - [reqId2, 1609894067190, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ - [NULL, 1609894067190, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0]
+ - columns: [
+ "reqId string",
+ "eventTime timestamp",
+ "ingestionTime timestamp",
+ "actionValue double"]
+ indexs: ["index1:reqId:eventTime"]
+ name: action
+ rows:
+ - [reqId1, 1609894067191, 1609894067191, 1.1]
+ - [NULL, 1609894067191, 1609894067191, 3.3]
+ - columns: [ "ingestionTime timestamp",
+ "zUserId string",
+ "uUserId string",
+ "nRequestTime timestamp",
+ "fWatchedTimeLen double" ]
+ indexs: [ "index1:zUserId|uUserId:ingestionTime" ]
+ name: bo_hislabel
+ rows:
+ - [ 1609894067191, zUserId1, uUserId1, 1609894067191, 1.0 ]
+ - [ 1609894067191, zUserId2, uUserId2, 1609894067191, 1.0 ]
+ - [ 1609894067191, zUserIdNull, uUserIdNull, 1609894067191, 1.0 ]
+ sql: |
+ select * from
+ (
+ select
+ `reqId` as reqId_1,
+ `reqId` as flattenRequest_reqId_original_0,
+ `eventTime` as flattenRequest_eventTime_original_1,
+ `uUserId` as flattenRequest_uUserId_original_2,
+ `zUserId` as flattenRequest_zUserId_original_3,
+ `uSex` as flattenRequest_uSex_combine_77,
+ `zSex` as flattenRequest_zSex_original_8,
+ `zChannel` as flattenRequest_zChannel_original_14,
+ `uPlayGame` as flattenRequest_uPlayGame_original_67,
+ `uHasJoinedGroup` as flattenRequest_uHasJoinedGroup_original_46,
+
+ `uWatchMorning` as flattenRequest_uWatchMorning_original_60,
+ `uWatchEvening` as flattenRequest_uWatchEvening_original_62,
+ `uWatchAvgLength` as flattenRequest_uWatchAvgLength_original_63,
+ `zSWihsperNum` as flattenRequest_zSWihsperNum_original_23,
+
+ sum(`uWatchAvgLength`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchAvgLength_window_sum_76,
+ avg(`uWatchMorning`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchMorning_window_avg_78,
+ avg(`uWatchEvening`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchEvening_window_avg_79,
+ sum(`zSWihsperNum`) over flattenRequest_zChannel_eventTime_0s_172801s as flattenRequest_zSWihsperNum_window_sum_80,
+ avg(`uWatchAvgLength`) over flattenRequest_uUserId_eventTime_0_10 as flattenRequest_uWatchAvgLength_window_avg_81,
+
+ case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0s_36001s then count(`zUserId`) over flattenRequest_uUserId_eventTime_0s_36001s else null end as flattenRequest_zUserId_window_count_82,
+ case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0s_172801s then count(`zUserId`) over flattenRequest_uUserId_eventTime_0s_172801s else null end as flattenRequest_zUserId_window_count_83,
+ case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uSex_eventTime_0_10 then count(`zUserId`) over flattenRequest_uSex_eventTime_0_10 else null end as flattenRequest_zUserId_window_count_84,
+ case when !isnull(lag(`zUserId`, 0)) over flattenRequest_uUserId_eventTime_0_10 then count(`zUserId`) over flattenRequest_uUserId_eventTime_0_10 else null end as flattenRequest_zUserId_window_count_85,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_zUserId_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_zUserId_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_86,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_zUserId_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_zUserId_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_87,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uPlayGame_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uPlayGame_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_88,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uHasJoinedGroup_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uHasJoinedGroup_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_89,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uHasJoinedGroup_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_uHasJoinedGroup_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_90,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uSex_eventTime_0s_172801s then count(`uUserId`) over flattenRequest_uSex_eventTime_0s_172801s else null end as flattenRequest_uUserId_window_count_91,
+ case when !isnull(lag(`uUserId`, 0)) over flattenRequest_uSex_eventTime_0s_36001s then count(`uUserId`) over flattenRequest_uSex_eventTime_0s_36001s else null end as flattenRequest_uUserId_window_count_92
+ from
+ `flattenRequest`
+ window flattenRequest_uUserId_eventTime_0_10 as (partition by `uUserId` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_zChannel_eventTime_0s_172801s as (partition by `zChannel` order by `eventTime` rows_range between 172801s preceding and 0s preceding),
+ flattenRequest_uUserId_eventTime_0s_36001s as (partition by `uUserId` order by `eventTime` rows_range between 36001s preceding and 0s preceding),
+ flattenRequest_uUserId_eventTime_0s_172801s as (partition by `uUserId` order by `eventTime` rows_range between 172801s preceding and 0s preceding),
+ flattenRequest_uSex_eventTime_0_10 as (partition by `uSex` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_zUserId_eventTime_0s_36001s as (partition by `zUserId` order by `eventTime` rows_range between 36001s preceding and 0s preceding),
+ flattenRequest_zUserId_eventTime_0s_172801s as (partition by `zUserId` order by `eventTime` rows_range between 172801s preceding and 0s preceding),
+ flattenRequest_uPlayGame_eventTime_0s_36001s as (partition by `uPlayGame` order by `eventTime` rows_range between 36001s preceding and 0s preceding),
+ flattenRequest_uHasJoinedGroup_eventTime_0s_36001s as (partition by `uHasJoinedGroup` order by `eventTime` rows_range between 36001s preceding and 0s preceding),
+ flattenRequest_uHasJoinedGroup_eventTime_0s_172801s as (partition by `uHasJoinedGroup` order by `eventTime` rows_range between 172801s preceding and 0s preceding),
+ flattenRequest_uSex_eventTime_0s_172801s as (partition by `uSex` order by `eventTime` rows_range between 172801s preceding and 0s preceding),
+ flattenRequest_uSex_eventTime_0s_36001s as (partition by `uSex` order by `eventTime` rows_range between 36001s preceding and 0s preceding))
+ as out0
+ last join
+ (
+ select
+ flattenRequest.reqId as reqId_74,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_73
+ from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`)
+ as out1
+ on out0.reqId_1 = out1.reqId_74
+ last join
+ (
+ select
+ reqId as reqId_75,
+ max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_max_74,
+ avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_avg_75
+ from
+ (select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest`)
+ window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as (
+ UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`) partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172801s preceding and 1s preceding INSTANCE_NOT_IN_WINDOW))
+ as out2
+ on out0.reqId_1 = out2.reqId_75
+ ;
+ batch_request:
+ columns: [
+ "reqId string",
+ "eventTime timestamp",
+ "uUserId string",
+ "zUserId string",
+ "uSex string",
+ "zSex string",
+ "zChannel string",
+ "uPlayGame string",
+ "uHasJoinedGroup string",
+ "uWatchMorning double",
+ "uWatchEvening double",
+ "uWatchAvgLength double",
+ "zSWihsperNum double"]
+ rows:
+ - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0]
+ - [reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ - [reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ - [NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0]
+ expect:
+ schema: >
+ reqId_1:string, flattenRequest_reqId_original_0:string, flattenRequest_eventTime_original_1:timestamp, flattenRequest_uUserId_original_2:string,
+ flattenRequest_zUserId_original_3:string, flattenRequest_uSex_combine_77:string, flattenRequest_zSex_original_8:string,
+ flattenRequest_zChannel_original_14:string, flattenRequest_uPlayGame_original_67:string, flattenRequest_uHasJoinedGroup_original_46:string,
+ flattenRequest_uWatchMorning_original_60:double, flattenRequest_uWatchEvening_original_62:double, flattenRequest_uWatchAvgLength_original_63:double,
+ flattenRequest_zSWihsperNum_original_23:double, flattenRequest_uWatchAvgLength_window_sum_76:double, flattenRequest_uWatchMorning_window_avg_78:double,
+ flattenRequest_uWatchEvening_window_avg_79:double, flattenRequest_zSWihsperNum_window_sum_80:double, flattenRequest_uWatchAvgLength_window_avg_81:double,
+ flattenRequest_zUserId_window_count_82:bigint, flattenRequest_zUserId_window_count_83:bigint, flattenRequest_zUserId_window_count_84:bigint,
+ flattenRequest_zUserId_window_count_85:bigint, flattenRequest_uUserId_window_count_86:bigint, flattenRequest_uUserId_window_count_87:bigint,
+ flattenRequest_uUserId_window_count_88:bigint, flattenRequest_uUserId_window_count_89:bigint, flattenRequest_uUserId_window_count_90:bigint,
+ flattenRequest_uUserId_window_count_91:bigint, flattenRequest_uUserId_window_count_92:bigint, reqId_74:string, action_actionValue_multi_direct_73:double,
+ reqId_75:string, bo_hislabel_fWatchedTimeLen_multi_max_74:double, bo_hislabel_fWatchedTimeLen_multi_avg_75:double
+ rows:
+ - [ reqId1, reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1,
+ 1.000000, 2.000000, 3.000000, 4.000000,
+ 33.000000, 1.000000, 2.000000, 404.000000, 3.000000,
+ 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101,
+ reqId1, 1.1, reqId1, NULL, NULL ]
+ - [ reqId2, reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2,
+ 1.000000, 2.000000, 3.000000, 4.000000,
+ 33.000000, 1.000000, 2.000000, 404.000000, 3.000000,
+ 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101,
+ reqId2, NULL, reqId2, 1.000000, 1.000000 ]
+ - [ reqId2, reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2,
+ 1.000000, 2.000000, 3.000000, 4.000000,
+ 33.000000, 1.000000, 2.000000, 404.000000, 3.000000,
+ NULL, NULL, NULL, NULL, 1, 1, 101, 101, 101, 101, 101,
+ reqId2, NULL, reqId2, NULL, NULL ]
+ - [ NULL, NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull,
+ 1.000000, 2.000000, 3.000000, 4.000000,
+ 33.000000, 1.000000, 2.000000, 404.000000, 3.000000,
+ 101, 101, 11, 11, 101, 101, 101, 101, 101, 101, 101,
+ NULL, 3.3, NULL, 1.000000, 1.000000 ]
+- id: 1
+ desc: mybug bo_hislabel_fWatchedTimeLen_multi_max_74
+ mode: rtidb-batch-unsupport
+ inputs:
+ - columns: [ "reqId string",
+ "eventTime timestamp",
+ "uUserId string",
+ "zUserId string",
+ "uSex string",
+ "zSex string",
+ "zChannel string",
+ "uPlayGame string",
+ "uHasJoinedGroup string",
+ "uWatchMorning double",
+ "uWatchEvening double",
+ "uWatchAvgLength double",
+ "zSWihsperNum double" ]
+ indexs: [
+ "index1:uUserId:eventTime",
+ "index2:zChannel:eventTime",
+ "index3:uSex:eventTime",
+ "index4:zUserId:eventTime",
+ "index5:uPlayGame:eventTime",
+ "index6:uHasJoinedGroup:eventTime",
+ "index7:zUserId|uUserId:eventTime" ]
+ name: flattenRequest
+ rows:
+ - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0]
+ - columns: [
+ "reqId string",
+ "eventTime timestamp",
+ "ingestionTime timestamp",
+ "actionValue double"]
+ indexs: ["index1:reqId:eventTime"]
+ name: action
+ rows:
+ - [reqId1, 1609894067191, 1609894067191, 1.1]
+ - [NULL, 1609894067191, 1609894067191, 3.3]
+ - columns: [ "ingestionTime timestamp",
+ "zUserId string",
+ "uUserId string",
+ "nRequestTime timestamp",
+ "fWatchedTimeLen double"]
+ indexs: ["index1:zUserId|uUserId:ingestionTime"]
+ name: bo_hislabel
+ repeat: 100
+ rows:
+ - [ 1609894067191, zUserId1, uUserId1, 1609894067191, 1.0 ]
+ - [ 1609894067191, zUserId2, uUserId2, 1609894067191, 1.0 ]
+ - [ 1609894067191, NULL, NULL, 1609894067191, 1.0 ]
+ sql: |-
+ select * from
+ (
+ select
+ `reqId` as reqId_1
+ from `flattenRequest`) as out0
+ last join
+ (
+ select
+ flattenRequest.reqId as reqId_74,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_73
+ from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`)
+ as out1
+ on out0.reqId_1 = out1.reqId_74
+ last join
+ (
+ select
+ reqId as reqId_75,
+ sum(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_sum_73,
+ max(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_max_74,
+ avg(`fWatchedTimeLen`) over bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as bo_hislabel_fWatchedTimeLen_multi_avg_75
+ from
+ (select `eventTime` as `ingestionTime`, `zUserId` as `zUserId`, `uUserId` as `uUserId`, timestamp('2019-07-18 09:20:20') as `nRequestTime`, double(0) as `fWatchedTimeLen`, reqId from `flattenRequest`)
+ window bo_hislabel_zUserId_uUserId_ingestionTime_1s_172801s as (
+ UNION (select `ingestionTime`, `zUserId`, `uUserId`, `nRequestTime`, `fWatchedTimeLen`, '' as reqId from `bo_hislabel`) partition by `zUserId`,`uUserId` order by `ingestionTime` rows_range between 172801s preceding and 1s preceding INSTANCE_NOT_IN_WINDOW))
+ as out2
+ on out0.reqId_1 = out2.reqId_75
+ ;
+ tags: ["@baoxinqi, avg 空表处理需要对齐feql/mysql"]
+ batch_request:
+ columns: [
+ "reqId string",
+ "eventTime timestamp",
+ "uUserId string",
+ "zUserId string",
+ "uSex string",
+ "zSex string",
+ "zChannel string",
+ "uPlayGame string",
+ "uHasJoinedGroup string",
+ "uWatchMorning double",
+ "uWatchEvening double",
+ "uWatchAvgLength double",
+ "zSWihsperNum double" ]
+ indexs: [
+ "index1:uUserId:eventTime",
+ "index2:zChannel:eventTime",
+ "index3:uSex:eventTime",
+ "index4:zUserId:eventTime",
+ "index5:uPlayGame:eventTime",
+ "index6:uHasJoinedGroup:eventTime",
+ "index7:zUserId|uUserId:eventTime",
+ "index8:uUserId:eventTime",
+ "index9:uUserId:eventTime" ]
+ name: flattenRequest
+ rows:
+ # pure history window is empty: rows out of time range
+ - [reqId1, 1609894067191, uUserId1, zUserId1, uSex1, zSex1, zChannel1, uPlayGame1, uHasJoinedGroup1, 1.0, 2.0, 3.0, 4.0]
+ # pure history window isn't empty
+ - [reqId2, 1609894068191, uUserId2, zUserId2, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ # last join key is NULL
+ - [NULL, 1609894068191, uUserIdNull, zUserIdNull, uSexNull, zSexNull, zChannelNull, uPlayGameNull, uHasJoinedGroupNull, 1.0, 2.0, 3.0, 4.0]
+ - [reqId2, 1609894068191, uUserId2, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ - [reqId2, 1609894068191, NULL, NULL, uSex2, zSex2, zChannel2, uPlayGame2, uHasJoinedGroup2, 1.0, 2.0, 3.0, 4.0]
+ expect:
+ schema: reqId_1:string, reqId_74:string, action_actionValue_multi_direct_73:double, reqId_75:string, bo_hislabel_fWatchedTimeLen_multi_sum_73:double, bo_hislabel_fWatchedTimeLen_multi_max_74:double, bo_hislabel_fWatchedTimeLen_multi_avg_75:double
+ rows:
+ - [ reqId1, reqId1, 1.1, reqId1, NULL, NULL, NULL ]
+ - [ reqId2, reqId2, NULL, reqId2, 100.0, 1.0, 1.0 ]
+ - [ NULL, NULL, 3.3, NULL, NULL, NULL, NULL]
+ - [ reqId2, reqId2, NULL, reqId2, NULL, NULL, NULL ]
+ - [ reqId2, reqId2, NULL, reqId2, 100.0, 1.0, 1.0 ]
diff --git a/cases/integration_test/join/test_lastjoin_complex.yaml b/cases/integration_test/join/test_lastjoin_complex.yaml
new file mode 100644
index 00000000000..01781421fbc
--- /dev/null
+++ b/cases/integration_test/join/test_lastjoin_complex.yaml
@@ -0,0 +1,1156 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: lastjoin+窗口
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c4,
+ sum({1}.c4) OVER w1 as w1_c4_sum
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32,32]
+ - [2,"aa",21,32,64]
+ - [3,"aa",22,32,64]
+ - [4,"bb",23,34,34]
+ - [5,"bb",24,34,68]
+ - id: 1
+ desc: lastjoin+窗口-没有匹配的列
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"cc",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"cc",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c4,
+ sum({1}.c4) OVER w1 as w1_c4_sum,
+ count({1}.c4) OVER w1 as w1_c4_count
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",20,32,32,1]
+ - [2,"aa",21,32,64,2]
+ - [3,"aa",22,32,64,2]
+ - [4,"bb",23,NULL,NULL,0]
+ - [5,"bb",24,NULL,NULL,0]
+ - id: 2
+ desc: lastjoin+窗口+union
+ tags: ["TODO","暂时不支持 lastjoin window + union共存"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["d1 string","d4 bigint","d7 timestamp"]
+ indexs: ["index1:d1:d7"]
+ rows:
+ - ["aa",30,1590738990000]
+ - ["aa",32,1590738990002]
+ - ["bb",34,1590738990004]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","d1 string","d4 bigint","d7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02","aa",31,1590738990001]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04","bb",32,1590738990003]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select id,{0}.c1,{0}.c3,{1}.d4,
+ sum({1}.d4) OVER w1 as w1_c4_sum
+ from {0}
+ last join {1} ORDER BY {1}.d7 on {0}.c1={1}.d1
+ WINDOW
+ w1 AS (UNION {2} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","d4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32,32]
+ - [3,"aa",22,32,63]
+ - [5,"bb",24,34,67]
+ - id: 3
+ desc: lastjoin+窗口+union子查询
+ tags: ["TODO","暂时不支持 lastjoin window + union共存"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["d1 string","d4 bigint","d7 timestamp"]
+ indexs: ["index1:d1:d7"]
+ rows:
+ - ["aa",30,1590738990000]
+ - ["aa",32,1590738990002]
+ - ["bb",34,1590738990004]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","d4 bigint"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02",31]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04",32]
+ -
+ columns: ["d1 string","d7 timestamp"]
+ indexs: ["index1:d1:d7"]
+ rows:
+ - ["aa",1590738990001]
+ - ["bb",1590738990003]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select id,{0}.c1,{0}.c3,{1}.d4,
+ sum({1}.d4) OVER w1 as w1_c4_sum
+ from {0}
+ last join {1} ORDER BY {1}.d7 on {0}.c1={1}.d1
+ WINDOW
+ w1 AS (UNION
+ (select id,c1,c3,c4,c5,c6,c7,c8,d1,d4,d7 from {2} last join {3} ORDER BY {3}.d7 on {2}.c1={3}.d1)
+ PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","d4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32,32]
+ - [3,"aa",22,32,63]
+ - [5,"bb",24,34,67]
+ - id: 4
+ desc: lastjoin-一个子查询
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select id,{0}.c1,{0}.c3,t1.c4,
+ sum(t1.c4) OVER w1 as w1_c4_sum
+ from {0}
+ last join (select c1,c4,c7 from {1}) as t1 ORDER BY t1.c7 on {0}.c1=t1.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32,32]
+ - [2,"aa",21,32,64]
+ - [3,"aa",22,32,64]
+ - [4,"bb",23,34,34]
+ - [5,"bb",24,34,68]
+ - id: 5
+ desc: 基于子查询作窗口-功能边界外
+ tags: ["TODO","client core"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select t2.id,t2.c1,t2.c3,t1.c4,
+ sum(t1.c4) OVER w1 as w1_c4_sum
+ from (select id,c1,c3,c4,c7 from {0}) as t2
+ last join (select c1,c4,c7 from {1}) as t1 ORDER BY t1.c7 on t2.c1=t1.c1
+ WINDOW
+ w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ success: false
+ - id: 6-1
+ desc: 两个子查询lastjoin-子查询带窗口特征-rtidb不支持
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",23,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",24,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select id,t2.c1,t2.c3,t1.c4,
+ sum(t1.c4) OVER w1 as w1_c4_sum,
+ sum(t2.w2_c3_sum) OVER w1 as w2_c3_sum,
+ sum(t1.w3_c4_sum) OVER w1 as w3_c4_sum
+ from (select id,c1,c3,c4,c7,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2
+ last join (select c1,c4,c7,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1
+ ORDER BY t1.c7 on t2.c1=t1.c1
+ WINDOW
+ w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ success: false
+ - id: 6-2
+ desc: 两个子查询lastjoin-子查询带窗口特征-离线场景
+ tags: ["TODO", "@chenjing", "0.3.0", ""]
+# mode: rtidb-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",23,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",24,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select id,t2.c1,t2.c3,t1.c4,
+ sum(t1.c4) OVER w1 as w1_c4_sum,
+ sum(t2.w2_c3_sum) OVER w1 as w2_c3_sum,
+ sum(t1.w3_c4_sum) OVER w1 as w3_c4_sum
+ from (select id,c1,c3,c4,c7,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2
+ last join (select c1,c4,c7,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1
+ ORDER BY t1.c7 on t2.c1=t1.c1
+ WINDOW
+ w1 AS (PARTITION BY t2.c1 ORDER BY t2.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ success: true
+ - id: 8
+ desc: lastjoin三张表
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766400000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"ee",20,30,1.1,2.1,1606752000000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"]
+ - [3,"ee",20,32,1.1,2.3,1606924800000,"2020-05-01"]
+ - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1
+ last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double"]
+ rows:
+ - [1,"aa",20,31,1.1,null]
+ - [2,"bb",21,null,1.4,2.2]
+ - [3,"cc",22,32,null,null]
+ - [4,"dd",23,33,null,null]
+ - [5,"ee",24,34,1.5,2.4]
+ - id: 9-1
+ desc: lastjoin三张表-5个window, rtidb模式不支持
+ mode: offline-unsupport
+ tags: ["TODO","边界外", "@zhaowei", "后面需要支持多张表lastjoin后作window"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"bb",21,34,1.5,2.2,1606766400000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"bb",20,30,1.1,2.1,1606752000000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.3,1606924800000,"2020-05-01"]
+ - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"]
+ sql: |
+ select id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6,
+ sum({0}.c4) OVER w1 as w1_c4_sum,
+ sum({1}.c4) OVER w2 as w2_c4_sum,
+ sum({2}.c4) OVER w3 as w3_c4_sum,
+ sum({3}.c4) OVER w4 as w4_c4_sum,
+ count({3}.c4) OVER w5 as w5_c4_count
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1
+ last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1m PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {2}.c7 ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {3}.c7 ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 9-2
+ desc: lastjoin三张表-5个window-离线场景支持
+# mode: rtidb-unsupport
+ tags: ["TODO", "@chendihao", "last join 多张表后做window离线支持有问题"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",20,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",20,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"bb",21,34,1.5,2.2,1606766400000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"bb",20,30,1.1,2.1,1606752000000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1606838400000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.3,1606924800000,"2020-05-01"]
+ - [4,"ee",20,33,1.1,2.4,1607011200000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.5,1606752000000,"2020-05-02"]
+ sql: |
+ select id,{0}.c1,{0}.c3,{1}.c4,{2}.c5,{3}.c6,
+ sum({0}.c4) OVER w1 as w1_c4_sum,
+ sum({1}.c4) OVER w2 as w2_c4_sum,
+ sum({2}.c4) OVER w3 as w3_c4_sum,
+ sum({3}.c4) OVER w4 as w4_c4_sum,
+ count({3}.c4) OVER w5 as w5_c4_count
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ last join {2} ORDER BY {2}.c7 on {0}.c1={2}.c1
+ last join {3} ORDER BY {3}.c7 on {0}.c1={3}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1m PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {2}.c7 ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {3}.c7 ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+# order: id
+# columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","w1_c4_sum bigint","w2_c4_sum bigint","w3_c4_sum bigint","w4_c4_sum bigint","w5_c4_count bigint"]
+# rows:
+# - [1,"aa",20,31,1.1,null,30,32,31,null,0]
+# - [2,"aa",21,null,1.4,2.2,61,64,62,null,0]
+# - [3,"aa",22,32,null,null,63,64,62,null,0]
+# - [4,"bb",23,33,null,null,33,34,34,31,1]
+# - [5,"bb",24,34,1.5,2.4,67,68,68,62,2]
+ - id: 10
+ desc: t1 join t2 join t3,t2的key产出为null
+ mode: offline-unsupport
+# tags: ["@chendihao", "这个场景离线的预期不正确,需要迪豪看看"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c3,{2}.c4
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ last join {2} ORDER BY {2}.c7 on {1}.c3={2}.c3
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c3 int","c4 bigint"]
+ rows:
+ - [1,"aa",20,21,34]
+ - [2,"bb",21,null,32]
+ - [3,"cc",22,21,34]
+ - [4,"dd",23,21,34]
+ - [5,"ee",24,24,null]
+ - id: 11
+ desc: (t1 join t2) join t3
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select
+ t1.id,t1.c1,t1.c3,{2}.c4
+ from (
+ select {0}.id,{0}.c1,{1}.c3
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ ) as t1 last join {2} ORDER BY {2}.c7 on t1.c3={2}.c3
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint"]
+ rows:
+ - [1,"aa",21,34]
+ - [2,"bb",null,32]
+ - [3,"cc",21,34]
+ - [4,"dd",21,34]
+ - [5,"ee",24,null]
+ - id: 11-2
+ desc: (t1 join t2) join t3 error column resolved
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select
+ id,t1.c1,t1.c3,{2}.c4
+ from (
+ select id,{0}.c1,{1}.c3
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ ) as t1 last join {2} ORDER BY {2}.c7 on t1.c3={2}.c3
+ ;
+ expect:
+ success: false
+ - id: 12
+ desc: t1 join (t2 join t3)
+ mode: rtidb-unsupport
+ tags: ["@zhaowei RITDB边界外的能力join的时候主表只有一张","http://jira.4paradigm.com/browse/FEX-1014"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select
+ {0}.id,{0}.c1,t1.c3,t1.c4
+ from
+ {0} last join
+ (select {1}.c1,{1}.c3,{1}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1
+ order by t1.c7 on {0}.c1=t1.c1;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint"]
+ rows:
+ - [1,"aa",21,34]
+ - [2,"bb",null,null]
+ - [3,"cc",21,34]
+ - [4,"dd",21,34]
+ - [5,"ee",24,null]
+ - id: 13-1
+ desc: t1 join (t2 join t3)-rtidb功能边界外的查询, join包含两张主表
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select
+ {0}.id,{0}.c1,t1.c3,t1.c4
+ from
+ {0} last join
+ (select {1}.c1,{1}.c3,{2}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1
+ order by t1.c7 on {0}.c1=t1.c1;
+ expect:
+ success: true
+ order: id
+ columns: [ "id int", "c1 string", "c3 int", "c4 bigint"]
+ rows:
+ - [ 1, aa, 21, 34 ]
+ - [ 2, bb, NULL, NULL ]
+ - [ 3, cc, 21, 34 ]
+ - [ 4, dd, 21, 34 ]
+ - [ 5, ee, 24, NULL ]
+
+ - id: 13-2
+ desc: t1 join (t2 join t3)-key和ts不是来自同一个主表
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"aa",21,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",21,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",21,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",24,34,1.2,2.2,1606755660000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1606759200000,"2020-05-01"]
+ - [3,"bb",null,32,1.3,2.1,1606762800000,"2020-05-01"]
+ - [4,"bb",21,33,1.4,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1606766401000,"2020-05-02"]
+ sql: |
+ select
+ {0}.id,{0}.c1,t1.c3,t1.c4
+ from
+ {0} last join
+ (select {1}.c1,{1}.c3,{2}.c7,{2}.c4 from {1} last join {2} order by {2}.c7 on {1}.c3={2}.c3) as t1
+ order by t1.c7 on {0}.c1=t1.c1;
+ expect:
+ success: false
+ - id: 14
+ desc: lastjoin-重名
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ sql: |
+ select {0}.id,{0}.c1,c3,c3,{1}.c4
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ ;
+ expect:
+ success: false
+ - id: 15
+ desc: lastjoin-重名,指定不同的表名-在线场景
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c3,{1}.c4
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c3 int","c4 bigint"]
+ rows:
+ - [1,"aa",20,20,32]
+ - [2,"aa",21,20,32]
+ - [3,"aa",22,20,32]
+ - [4,"bb",23,21,34]
+ - [5,"bb",24,21,34]
+
+ - id: 16
+ desc: 两个子查询lastjoin,拼接条件不是主表的索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select t1.id,t2.c8,t2.c3,t1.c4
+ from
+ (select id,c1,c3,c4,c7,c8 from {0}) as t2
+ last join
+ (select c1,c4,c7,c8 from {1}) as t1
+ ORDER BY t1.c7 on t2.c8=t1.c8
+ ;
+ expect:
+ success: false
+ - id: 17-1
+ desc: 两个子查询lastjoin,order不是主表的ts-rtidb不支持
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select id,t2.c8,t2.c3,t1.c4
+ from
+ (select id,c1,c3,c4,c7,c8 from {0}) as t2
+ last join
+ (select c1,c4,c7,c8 from {1}) as t1
+ ORDER BY t1.c4 on t2.c1=t1.c1
+ ;
+ expect:
+ success: true
+ columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
+ rows:
+ - [ 1, '2020-05-01', 20, 30 ]
+
+ - id: 17-2
+ desc: 两个子查询lastjoin,order不是主表的ts-离线支持
+ mode: rtidb-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select id,t2.c8,t2.c3,t1.c4
+ from
+ (select id,c1,c3,c4,c7,c8 from {0}) as t2
+ last join
+ (select c1,c4,c7,c8 from {1}) as t1
+ ORDER BY t1.c4 on t2.c1=t1.c1
+ ;
+ expect:
+ success: true
+ - id: 18-1
+ desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-rtidb边界外
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select id,t2.c8,t2.c3,t1.c4
+ from
+ (select id,c1,c3,c4,c7,c8 from {0}) as t2
+ last join
+ (select c1,c4,c7,c8 from {1}) as t1
+ on t2.c8=t1.c8
+ ;
+ expect:
+ success: true
+ columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
+ rows:
+ - [ 1, '2020-05-01', 20, 30 ]
+
+ - id: 18-2
+ desc: 两个子查询lastjoin,拼接条件不是主表的索引-不带orderby-离线支持
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select id,t2.c8,t2.c3,t1.c4
+ from
+ (select id,c1,c3,c4,c7,c8 from {0}) as t2
+ last join
+ (select c1,c4,c7,c8 from {1}) as t1
+ on t2.c8=t1.c8
+ ;
+ expect:
+ success: true
+ columns: [ "id int", "c8 date", "c3 int", "c4 bigint" ]
+ rows:
+ - [ 1, '2020-05-01', 20, 30 ]
+
+ - id: 19-1
+ desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-02"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ sql: |
+ select id,t2.c1,t2.c3,t1.c4, t2.w2_c3_sum, t1.w3_c4_sum
+ from (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)) as t2
+ last join (select c1,c4,c7,c8,sum({0}.c4) OVER w3 as w3_c4_sum from {0} WINDOW w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1
+ on t2.c7=t1.c7
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint", "w2_c3_sum int", "w3_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30, 20, 30]
+ - [2,"aa",21,31, 41, 61]
+ - [3,"aa",22,32, 63, 63]
+ - [4,"bb",23,33, 23, 33]
+ - [5,"bb",24,34, 47, 67]
+ - id: 20
+ desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-带orderby
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ select id,t2.c1,t2.c3,t1.c4
+ from (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)) as t2
+ last join (select c1,c4,c7,c8,sum({1}.c4) OVER w3 as w3_c4_sum from {1} WINDOW w3 AS (PARTITION BY {1}.c1 ORDER BY {1}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1
+ ORDER BY t1.c7 on t2.c8=t1.c8
+ ;
+ expect:
+ success: true
+ columns: [ "id int", "c1 string", "c3 int", "c4 bigint" ]
+ rows:
+ - [ 1, aa, 20, 30 ]
+
+ - id: 21
+ desc: lastjoin列名重复-窗口没有指定表名
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"bb",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c4,
+ sum({1}.c4) OVER w1 as w1_c4_sum
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,31,31]
+ - [2,"aa",21,31,62]
+ - [3,"aa",22,31,62]
+ - [4,"bb",23,34,34]
+ - [5,"bb",24,34,68]
+ - id: 22
+ desc: lastjoin后group by
+ mode: request-unsupport, cluster-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","v1 bigint" ]
+ rows:
+ - [ "aa",26 ]
+ - [ "cc",151 ]
+ - id: 23
+ desc: lastjoin后group by, left key is match with left table index
+ mode: request-unsupport, cluster-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","v1 bigint" ]
+ rows:
+ - [ "aa",26 ]
+ - [ "cc",151 ]
+ - id: 24
+ desc: lastjoin后group by with left key and index key
+ mode: request-unsupport, cluster-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: |
+ select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 and {0}.c2 =
+ {1}.c2 group by {0}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","v1 bigint" ]
+ rows:
+ - [ "aa",13 ]
+ - [ "cc",151 ]
diff --git a/cases/integration_test/join/test_lastjoin_simple.yaml b/cases/integration_test/join/test_lastjoin_simple.yaml
new file mode 100644
index 00000000000..9bf50f39cf8
--- /dev/null
+++ b/cases/integration_test/join/test_lastjoin_simple.yaml
@@ -0,0 +1,1065 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 1
+ desc: 正常拼接
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 2
+ desc: 右表没有匹配
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "dd",41,151,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,null,null ]
+ - id: 3
+ desc: 右表匹配了多条-bigint
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - id: 4-1
+ desc: Last Join 无order by, 拼表条件命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "dd", 41, NULL, NULL ]
+ - id: 4-2
+ desc: Last Join 无order by, 拼表条件没有命中索引-performance-sensitive环境下编译失败
+ mode: non-performance-sensitive-unsupport, offline-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 4-2
+ desc: Last Join 无order by, 部分拼表条件命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c2
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",20,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "dd", 41, NULL, NULL ]
+ - id: 4-3
+ desc: Last Join 无order by, 拼表条件命中部分的组合索引(前缀索引), performance-sensitive下失败
+ mode: non-performance-sensitive-unsupport, offline-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1|c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4;
+ expect:
+ success: false
+ - id: 4-4
+ desc: Last Join 无order by, 拼表条件命中部分的组合索引(后缀索引)
+ mode: non-performance-sensitive-unsupport, offline-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2|c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4;
+ expect:
+ success: false
+ - id: 4-5
+ desc: Last Join 无order by, 拼表条件命中索引, 副表多条命中
+ tags: [ "注意offline随机拼接最后一条,改变结果顺序可能导致Spark结果不符合预期" ]
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",51,130,1590738992000 ]
+ - [ "bb",31,132,1590738989000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,132,1590738989000 ]
+ - [ "dd", 41, NULL, NULL ]
+ - id: 4-6
+ desc: Last Join 无order by, 拼表条件没有命中索引-离线支持
+ mode: rtidb-unsupport,cli-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 2, 13, 1590738989000 ]
+ - [ "bb", 21, 131, 1590738990000 ]
+ - id: 4-7
+ desc: Last Join 无order by, 部分拼表条件命中索引(常量条件=右边索引key)
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2, {1}.c1 as t2_c1, {1}.c3,{1}.c4 from {0} last join {1} on {1}.c1="aa" and {0}.c4={1}.c4;
+ expect:
+ columns: [ "c1 string","c2 int", "t2_c1 string", "c3 bigint","c4 timestamp" ]
+ order: c2
+ rows:
+ - [ "aa",2, "aa", 13,1590738989000 ]
+ - [ "aa",20,"aa", 15,1590738991000 ]
+ - [ "bb",21, "aa", 14,1590738990000 ]
+ - [ "dd", 41, "aa", 14, 1590738990000 ]
+ - id: 5
+ desc: orderby-timestamp
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738992000 ]
+ - [ "bb",41,141,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,121,1590738992000 ]
+ - id: 6
+ desc: orderby-int without index optimized, request-unsupport
+ mode: request-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738991000 ]
+ - [ "bb",31,141,1590738992000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1;
+ expect:
+ success: true
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ aa, 2, 13, 1590738989000 ]
+ - [ bb, 21, 121, 1590738991000 ]
+
+ - id: 6
+ desc: orderby-int-离线支持
+ mode: rtidb-unsupport,cli-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738991000 ]
+ - [ "bb",31,141,1590738992000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,121,1590738991000 ]
+ - id: 7
+ desc: orderby-float
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ]
+ - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 8
+ desc: orderby-double
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ]
+ - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 9
+ desc: orderby-date
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ]
+ - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c5 on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 10
+ desc: orderby-string
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.3,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.5,3.6,1590738991000,"2020-05-04","bc" ]
+ - [ "bb",2.4,3.1,1590738992000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c6 on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 11
+ desc: 拼接条件-bigint
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",41,31,1590738992000 ]
+ - [ "bb",41,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 12
+ desc: 拼接条件-int
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 13
+ desc: 拼接条件-float-未命中索引
+ mode: rtidb-unsupport, performance-sensitive-unsupport
+# tags: ["TODO", "v0.3.0", "@chenjing, fix join on double/float equal condition"]
+ inputs:
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.2,3.2,1590738992000,"2020-05-04","bc" ]
+ - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2;
+ expect:
+ columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2.1,3.1,1590738989000 ]
+ - [ "bb",2.2,3.2,1590738992000 ]
+ - id: 13-2
+ desc: 拼接条件-double
+ mode: rtidb-unsupport, performance-sensitive-unsupport
+# tags: ["TODO", "v0.3.0", "@chenjing, fix join on double/float equal condition"]
+ inputs:
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-03","ab" ]
+ - [ "bb",2.2,3.2,1590738992000,"2020-05-04","bc" ]
+ - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3;
+ expect:
+ columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2.1,3.1,1590738989000 ]
+ - [ "bb",2.2,3.2,1590738992000 ]
+ - id: 14
+ desc: 拼接条件-date
+ inputs:
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c5:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02","aa" ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 string" ]
+ indexs: [ "index1:c5:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01","aa" ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02","ab" ]
+ - [ "bb",2.2,3.2,1590738992000,"2020-05-02","bc" ]
+ - [ "bb",2.2,3.2,1590738991000,"2020-05-02","bb" ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c5={1}.c5;
+ expect:
+ columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2.1,3.1,1590738989000 ]
+ - [ "bb",2.2,3.2,1590738992000 ]
+ - id: 14
+ desc: 拼接条件-timestamp
+ inputs:
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 timestamp" ]
+ indexs: [ "index1:c6:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01",1590738989000 ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02",1590738990000 ]
+ - columns: [ "c1 string","c2 float","c3 double","c4 timestamp","c5 date","c6 timestamp" ]
+ indexs: [ "index1:c6:c4" ]
+ rows:
+ - [ "aa",2.1,3.1,1590738989000,"2020-05-01",1590738989000 ]
+ - [ "bb",2.2,3.2,1590738990000,"2020-05-02",1590738990000 ]
+ - [ "bb",2.2,3.2,1590738992000,"2020-05-02",1590738990000 ]
+ - [ "bb",2.2,3.2,1590738991000,"2020-05-02",1590738990000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c6={1}.c6;
+ expect:
+ columns: [ "c1 string","c2 float","c3 double","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2.1,3.1,1590738989000 ]
+ - [ "bb",2.2,3.2,1590738992000 ]
+ - id: 15
+ desc: 不同类型的列作为拼接条件
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,2,1590738989000 ]
+ - [ "bb",21,21,1590738990000 ]
+ - [ "bb",21,21,1590738992000 ]
+ - [ "bb",21,21,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,2,1590738989000 ]
+ - [ "bb",21,21,1590738992000 ]
+ - id: 16
+ desc: 多个拼接条件
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2 and {0}.c3={1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 17
+ desc: 不等值拼接
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2 = {1}.c2 and {0}.c3 <= {1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - id: 17-1
+ desc: 不等值拼接-未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3<{1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,32,1590738993000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - id: 17-2
+ desc: order by 限定列的范围-常量
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 and {1}.c3>10;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,null,null ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 18
+ desc: order by 限定列的范围-变量
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",22,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",22,31,1590738992000 ]
+ - [ "bb",22,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 and {0}.c2<{1}.c2;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,null,null ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 19
+ desc: 拼接条件中有空串
+ mode: cli-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "",2,3,1590738989000 ]
+ - [ "bb",22,31,1590738990000 ]
+ - [ "ab",21,32,1590738993000 ]
+ - [ "bb",22,31,1590738992000 ]
+ - [ "bb",22,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 19
+ desc: 拼接条件中有null
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ NULL,2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ NULL,2,3,1590738989000 ]
+ - [ "bb",22,31,1590738990000 ]
+ - [ "ab",21,32,1590738993000 ]
+ - [ "bb",22,31,1590738992000 ]
+ - [ "bb",22,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ null,2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 20
+ desc: 结合limit
+ tags: [ "TODO", "remove @zhaowei" ]
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",41,31,1590738992000 ]
+ - [ "bb",41,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3={1}.c3 limit 1;
+ expect:
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - id: 21
+ desc: 三表拼表
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738992000 ]
+ - [ "bb",41,121,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",21,131,1590738992000 ]
+ - [ "aa",41,121,1590738991000 ]
+ - [ "bb",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c4,{2}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c1={1}.c1 last join {2} order by {2}.c4 on {0}.c1={2}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c4 timestamp","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,1590738989000,1590738992000 ]
+ - [ "bb",21,1590738992000,1590738991000 ]
+ - id: 22
+ desc: 拼接条件不是索引列
+ mode: rtidb-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - id: 23
+ desc: 使用表别名
+ inputs:
+ - columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c2" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738991000 ]
+ - [ "bb",31,141,1590738992000 ]
+ sql: select t1.c1,t1.c2,t2.c3,t2.c4 from {0} as t1 last join {1} as t2 ORDER BY t2.c2 on t1.c1=t2.c1;
+ expect:
+ columns: [ "c1 string","c2 bigint","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,121,1590738991000 ]
+ - id: 25
+ desc: LAST JOIN with rename table
+ mode: python-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4", "index2:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",21,32,1590738993000 ]
+ - [ "cc",21,31,1590738992000 ]
+ - [ "dd",21,31,1590738991000 ]
+ sql: |
+ select
+ {0}.c1, {0}.c2, {0}.c3,
+ t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4,
+ t3.c1 as t3_c1, t3.c4 as t3_c4 from {0}
+ last join {1} as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3
+ last join {1} as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3;
+ expect:
+ columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000 ]
+ - [ "bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000 ]
+ - id: 26
+ desc: LAST JOIN subquery with rename table
+ mode: python-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "col1 string","col2 int","col3 bigint","col4 timestamp" ]
+ indexs: [ "index1:col2:col4", "index2:col1:col4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",21,32,1590738993000 ]
+ - [ "cc",21,31,1590738992000 ]
+ - [ "dd",21,31,1590738991000 ]
+ sql: |
+ select
+ {0}.c1, {0}.c2, {0}.c3,
+ t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4,
+ t3.c1 as t3_c1, t3.c4 as t3_c4 from {0}
+ last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3
+ last join (select col1 as c1, col3 as c3, col4 as c4 from {1}) as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3;
+ expect:
+ columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000 ]
+ - [ "bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000 ]
+ - id: 27
+ desc: LAST JOIN subquery with rename table 2
+ mode: python-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "col1 string","col2 int","col3 bigint","col4 timestamp" ]
+ indexs: [ "index1:col2:col4", "index2:col1:col4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",21,32,1590738993000 ]
+ - [ "cc",21,31,1590738992000 ]
+ - [ "dd",21,31,1590738991000 ]
+ sql: |
+ select
+ {0}.c1, {0}.c2, {0}.c3,
+ t2.c1 as t2_c1, t2.c2 as t2_c2, t2.c3 as t2_c3,t2.c4 as t2_c4,
+ t3.c1 as t3_c1, t3.c4 as t3_c4 from {0}
+ last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t2 ORDER BY t2.c4 on {0}.c2 = t2.c2 and {0}.c3 <= t2.c3
+ last join (select col1 as c1, col2 as c2, col3 as c3, col4 as c4 from {1}) as t3 ORDER BY t3.c4 on {0}.c1 = t3.c1 and {0}.c3 <= t3.c3;
+ expect:
+ columns: [ "c1 string","c2 int", "c3 bigint", "t2_c1 string", "t2_c2 int", "t2_c3 bigint","t2_c4 timestamp", "t3_c1 string", "t3_c4 timestamp" ]
+ order: c1
+ rows:
+ - ["aa",2,3, "aa", 2, 3,1590738989000, "aa", 1590738989000]
+ - ["bb",21, 31, "cc", 21, 32,1590738993000, "bb", 1590738990000]
+
+ - id: 28
+ desc: orderby-smallint
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - columns: ["c1 string","c2 smallint","c3 double","c4 timestamp","c5 date","c6 string"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",1,3.1,1590738989000,"2020-05-01","aa"]
+ - ["bb",2,3.3,1590738990000,"2020-05-03","ab"]
+ - ["bb",5,3.6,1590738991000,"2020-05-04","bc"]
+ - ["bb",4,3.1,1590738992000,"2020-05-02","bb"]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1;
+ expect:
+ success: false
+
+ - id: 29
+ desc: orderby-bool
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - columns: ["c1 string","c2 bool","c3 double","c4 timestamp","c5 date","c6 string"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",true,3.1,1590738989000,"2020-05-01","aa"]
+ - ["bb",true,3.3,1590738990000,"2020-05-03","ab"]
+ - ["bb",false,3.6,1590738991000,"2020-05-04","bc"]
+ - ["bb",true,3.1,1590738992000,"2020-05-02","bb"]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c2 on {0}.c1={1}.c1;
+ expect:
+ success: false
+ - id: 30
+ desc: 拼接条件-smallint
+ inputs:
+ - columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c2:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c2:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["bb",21,31,1590738992000]
+ - ["bb",21,31,1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2;
+ expect:
+ columns: ["c1 string","c2 smallint","c3 bigint","c4 timestamp"]
+ order: c1
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738992000]
+ - id: 31
+ desc: 拼接条件-bool
+ inputs:
+ - columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c2:c4"]
+ rows:
+ - ["aa",true,3,1590738989000]
+ - ["bb",false,31,1590738990000]
+ - columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c2:c4"]
+ rows:
+ - ["aa",true,3,1590738989000]
+ - ["bb",false,31,1590738990000]
+ - ["bb",false,31,1590738992000]
+ - ["bb",false,31,1590738991000]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c2={1}.c2;
+ expect:
+ columns: ["c1 string","c2 bool","c3 bigint","c4 timestamp"]
+ order: c1
+ rows:
+ - ["aa",true,3,1590738989000]
+ - ["bb",false,31,1590738992000]
+ - id: 4-6
+ desc: lastjoin-拼表条件没有命中索引
+ mode: performance-sensitive-unsupport,cli-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} order by {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa", 2, 13, 1590738989000 ]
+ - [ "bb", 21, 131, 1590738990000 ]
+ -
+ id: 12
+ desc: 不指定索引,进行lastjoin
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "dd", 41, NULL, NULL ]
+ -
+ id: 13
+ desc: 不指定索引,进行lastjoin,匹配多行
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,131,1590738990000 ]
+ - [ "bb",21,NULL,NULL ]
+ - [ "dd", 41, NULL, NULL ]
\ No newline at end of file
diff --git a/cases/integration_test/long_window/test_count_where.yaml b/cases/integration_test/long_window/test_count_where.yaml
new file mode 100644
index 00000000000..e2ac7304c72
--- /dev/null
+++ b/cases/integration_test/long_window/test_count_where.yaml
@@ -0,0 +1,811 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.6.0
+cases:
+ -
+ id: 0
+ desc: 长窗口count_where,date类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",1,2,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",1,3,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_where_c8_c2
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,1]
+ -
+ id: 1
+ desc: 长窗口count_where,smallint类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c2,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 2
+ desc: 长窗口count_where,int类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 3
+ desc: 长窗口count_where,bigint类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c4,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 4
+ desc: 长窗口count_where,string类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c1,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 5
+ desc: 长窗口count_where,timestamp类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c7,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 6
+ desc: 长窗口count_where,row类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(*,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 7
+ desc: 长窗口count_where,bool类型
+ tags: ["TODO","bug,下个版本修复后测试,@qiliguo"]
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c9,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 8
+ desc: 长窗口count_where,float类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c5,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 9
+ desc: 长窗口count_where,double类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c6,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 10
+ desc: 长窗口count_where,第二个参数使用bool列
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c9) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 11
+ desc: 长窗口count_where,第二个参数使用=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",0]
+ - [3,"aa",0]
+ - [4,"aa",1]
+ - [5,"aa",1]
+ -
+ id: 12
+ desc: 长窗口count_where,第二个参数使用!=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2!=4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",2]
+ -
+ id: 13
+ desc: 长窗口count_where,第二个参数使用>=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2>=2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",1]
+ - [3,"aa",2]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ -
+ id: 14
+ desc: 长窗口count_where,第二个参数使用<=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<=3) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 15
+ desc: 长窗口count_where,第二个参数使用>
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",0]
+ - [2,"aa",1]
+ - [3,"aa",2]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ -
+ id: 17
+ desc: 长窗口count_where,第二个参数使用and
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4 and c2>1) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 18
+ desc: 长窗口count_where,第二个参数使用两个列
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c3>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 19
+ desc: 长窗口count_where,第二个参数使用嵌套
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,if_null(c2,0)>4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 20
+ desc: 长窗口count_where,第二个参数常量在前
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,4>c2) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 21
+ desc: 长窗口count_where,rows
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 22
+ desc: 长窗口count_where,第二个参数类型是int
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c3<23) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 23
+ desc: 长窗口count_where,第二个参数类型是bigint
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c4<33) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 24
+ desc: 长窗口count_where,第二个参数类型是float
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c5<1.35) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 25
+ desc: 长窗口count_where,第二个参数类型是double
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c6<2.4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 26
+ desc: 长窗口count_where,第二个参数类型是timestamp
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c7<1590738993000) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 27
+ desc: 长窗口count_where,第二个参数类型是date
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c8<"2020-05-04") OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 28
+ desc: 长窗口count_where,第二个参数类型是bool
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",false]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c9=true) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 29
+ desc: 长窗口count_where,w1:2
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 30
+ desc: 长窗口count_where,磁盘表
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: SSD
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c3,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 31
+ desc: 长窗口count_where,第二个参数类型是string
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01","true"]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02","true"]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03","true"]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04","false"]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05","false"]
+ sql: |
+ SELECT id, c1, count_where(c8,c9="true") OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+ -
+ id: 32
+ desc: 长窗口count_where,验证预聚合表
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, count_where(c8,c2<4) OVER w1 as w1_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_count bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",2]
+ - [5,"aa",1]
+
+
+
diff --git a/cases/integration_test/long_window/test_long_window.yaml b/cases/integration_test/long_window/test_long_window.yaml
new file mode 100644
index 00000000000..75f6f6193a5
--- /dev/null
+++ b/cases/integration_test/long_window/test_long_window.yaml
@@ -0,0 +1,397 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: options(long_window='w1:2y')
+ longWindow: w1:2y
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1262278860000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1293814860000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1325350860000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1356973260000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1356973260000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: create aggregator failed
+ -
+ id: 1
+ desc: options(long_window='w1:2d')
+ longWindow: w1:2d
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1577811660000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1577898060000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1577984460000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1578070860000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1578157260000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c4
+ type: bigint
+ rows:
+ - ["aa",1577664000000,1577836799999,1,1,null]
+ - ["aa",1577836800000,1578009599999,2,2,null]
+ -
+ id: 2
+ desc: options(long_window='w1:2h')
+ longWindow: w1:2h
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1577811661000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1577815261000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1577818861000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1577822461000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1577826061000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, count(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c4
+ type: bigint
+ rows:
+ - ["aa",1577808000000,1577815199999,1,1,null]
+ - ["aa",1577815200000,1577822399999,2,2,null]
+ -
+ id: 3
+ desc: options(long_window='w1:2m')
+ longWindow: w1:2m
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1577812141000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1577812201000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1577812261000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1577812321000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1577812381000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1577812080000,1577812199999,1,30,null]
+ - ["aa",1577812200000,1577812319999,2,63,null]
+ -
+ id: 4
+ desc: options(long_window='w1:2s')
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738992000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738993000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738995000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,1,30,null]
+ - ["aa",1590738992000,1590738993999,2,63,null]
+ -
+ id: 5
+ desc: 相同的PARTITION BY和ORDER BY,长窗口和短窗口可合并
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_long, count(c4) OVER w2 as w2_long from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint","w2_long bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"aa",61,2]
+ - [3,"aa",93,3]
+ - [4,"aa",96,4]
+ - [5,"aa",99,4]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738990001,2,61,null]
+ - ["aa",1590738990002,1590738990003,2,65,null]
+ -
+ id: 6
+ desc: 相同的PARTITION BY和ORDER BY,长窗口之间可合并
+ longWindow: w1:2,w2:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_long, min(c3) OVER w2 as w2_long from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint","w2_long int"]
+ rows:
+ - [1,"aa",30,20]
+ - [2,"aa",61,20]
+ - [3,"aa",93,20]
+ - [4,"aa",96,20]
+ - [5,"aa",99,21]
+ preAggList:
+ -
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738990001,2,61,null]
+ - ["aa",1590738990002,1590738990003,2,65,null]
+ -
+ name: pre_{db_name}_{sp_name}_w2_min_c3
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738990001,2,20,null]
+ - ["aa",1590738990002,1590738990003,2,22,null]
+ -
+ id: 7
+ desc: 相同的PARTITION BY和ORDER BY,-短窗口之间可合并(三个窗口 一个长窗口,俩个短窗口)
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_long, avg(c3) OVER w2 as w2_c3_avg, count(c3) OVER w3 as w3_c3_count from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint","w2_c3_avg double","w3_c3_count bigint"]
+ rows:
+ - [1,"aa",30,20,1]
+ - [2,"aa",61,20.5,2]
+ - [3,"aa",93,21.5,3]
+ - [4,"aa",96,22.5,4]
+ - [5,"aa",99,23.5,4]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738990001,2,61,null]
+ - ["aa",1590738990002,1590738990003,2,65,null]
+ -
+ id: 8
+ desc: 不同的PARTITION BY和ORDER BY,长窗口和短窗口混合-不可合并窗口
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,c3, sum(c4) OVER w1 as w1_long,count(c5) OVER w2 as w2_c5_count from {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_long bigint","w2_c5_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"aa",20,93,3]
+ - [4,"aa",20,96,3]
+ - [5,"aa",24,99,1]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738990001,2,61,null]
+ - ["aa",1590738990002,1590738990003,2,65,null]
+ -
+ id: 9
+ desc: 窗口名不存在
+ longWindow: w2:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: long_windows option doesn't match window in sql
+ -
+ id: 10
+ version: 0.6.1
+ desc: delete pk
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738992000,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738993000,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738995000,"2020-05-05"]
+ steps:
+ - sql: SELECT id, c1, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_long bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,1,30,null]
+ - ["aa",1590738992000,1590738993999,2,63,null]
+ - sql: delete from {0} where c1='aa';
+ expect:
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ count: 0
+ -
+ id: 11
+ version: 0.6.1
+ desc: delete 组合索引
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738992000,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738993000,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"aa",20,34,1.5,2.5,1590738995000,"2020-05-05"]
+ steps:
+ - sql: SELECT id, c1,c3, sum(c4) OVER w1 as w1_long FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_long bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"aa",20,61]
+ - [3,"aa",20,93]
+ - [4,"aa",20,96]
+ - [5,"aa",20,99]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ type: bigint
+ rows:
+ - ["aa|20",1590738990000,1590738991999,1,30,null]
+ - ["aa|20",1590738992000,1590738993999,2,63,null]
+ - sql: delete from {0} where c1='aa' and c3=20;
+ expect:
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4
+ count: 0
+
+
+
diff --git a/cases/integration_test/long_window/test_long_window_batch.yaml b/cases/integration_test/long_window/test_long_window_batch.yaml
new file mode 100644
index 00000000000..60c938490d4
--- /dev/null
+++ b/cases/integration_test/long_window/test_long_window_batch.yaml
@@ -0,0 +1,35 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: options格式错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ deploy {0} options(long_windows='w1:100') SELECT id, c1, avg(c5) OVER w1 as w1_c4_avg FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
diff --git a/cases/integration_test/long_window/test_udaf.yaml b/cases/integration_test/long_window/test_udaf.yaml
new file mode 100644
index 00000000000..1eb2778c6e5
--- /dev/null
+++ b/cases/integration_test/long_window/test_udaf.yaml
@@ -0,0 +1,788 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 长窗口count/avg/sum/max/min,date类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c8) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ expectProvider:
+ 0:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf date" ]
+ rows:
+ - [1,"aa","2020-05-01"]
+ - [2,"aa","2020-05-01"]
+ - [3,"aa","2020-05-01"]
+ - [4,"aa","2020-05-02"]
+ - [5,"aa","2020-05-03"]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_c8
+ type: date
+ rows:
+ - ["aa",1590738990000,1590738991000,2,"2020-05-01",null]
+ - ["aa",1590738992000,1590738993000,2,"2020-05-03",null]
+ 1:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf date" ]
+ rows:
+ - [1,"aa","2020-05-01"]
+ - [2,"aa","2020-05-02"]
+ - [3,"aa","2020-05-03"]
+ - [4,"aa","2020-05-04"]
+ - [5,"aa","2020-05-05"]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_c8
+ type: date
+ rows:
+ - ["aa",1590738990000,1590738991000,2,"2020-05-02",null]
+ - ["aa",1590738992000,1590738993000,2,"2020-05-04",null]
+ 2:
+ success: false
+ msg: fail
+ 3:
+ success: false
+ msg: fail
+ 4:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c8
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991000,2,2,null]
+ - ["aa",1590738992000,1590738993000,2,2,null]
+ -
+ id: 1
+ desc: 长窗口count/avg/sum/max/min,smallint类型
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](c2) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",1]
+ - [3,"aa",1]
+ - [4,"aa",2]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_c2
+ type: smallint
+ rows:
+ - ["aa",1590738990000,1590738991000,2,1,null]
+ - ["aa",1590738992000,1590738993000,2,3,null]
+ 1:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",4]
+ - [5,"aa",5]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_c2
+ type: smallint
+ rows:
+ - ["aa",1590738990000,1590738991000,2,2,null]
+ - ["aa",1590738992000,1590738993000,2,4,null]
+ 2:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",3]
+ - [3,"aa",6]
+ - [4,"aa",9]
+ - [5,"aa",12]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c2
+ type: smallint
+ rows:
+ - ["aa",1590738990000,1590738991000,2,3,null]
+ - ["aa",1590738992000,1590738993000,2,7,null]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",1.5]
+ - [3,"aa",2]
+ - [4,"aa",3]
+ - [5,"aa",4]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c2
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991000,2,2,null]
+ - ["aa",1590738992000,1590738993000,2,2,null]
+ -
+ id: 2
+ desc: 长窗口count/avg/sum/max/min,int类型 # pre_{db_name}_{table_name}_{window_name}_{function_name}_{column_name};
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](c3) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_c3
+ type: int
+ rows:
+ - ["aa",1590738990000,1590738991999,2,20,null]
+ - ["aa",1590738992000,1590738993999,2,22,null]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",23]
+ - [5,"aa",24]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_c3
+ type: int
+ rows:
+ - ["aa",1590738990000,1590738991999,2,21,null]
+ - ["aa",1590738992000,1590738993999,2,23,null]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",66]
+ - [5,"aa",69]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c3
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,41,null]
+ - ["aa",1590738992000,1590738993999,2,45,null]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",22]
+ - [5,"aa",23]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_avg_c3;
+# rows:
+# - ["aa",1590738990000,1590738991999,2,20.5,null]
+# - ["aa",1590738992000,1590738993999,2,22.5,null]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c3
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 3
+ desc: 长窗口count/avg/sum/max/min,bigint类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](c4) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",30]
+ - [3,"aa",30]
+ - [4,"aa",31]
+ - [5,"aa",32]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,30,null]
+ - ["aa",1590738992000,1590738993999,2,32,null]
+ 1:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [3,"aa",32]
+ - [4,"aa",33]
+ - [5,"aa",34]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,31,null]
+ - ["aa",1590738992000,1590738993999,2,33,null]
+ 2:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_c4;
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,61,null]
+ - ["aa",1590738992000,1590738993999,2,65,null]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",30.5]
+ - [3,"aa",31]
+ - [4,"aa",32]
+ - [5,"aa",33]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c4
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 4
+ desc: 长窗口count/avg/sum/max/min,string类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](c8) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expectProvider:
+ 0:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf string" ]
+ rows:
+ - [1,"aa","2020-05-01"]
+ - [2,"aa","2020-05-01"]
+ - [3,"aa","2020-05-01"]
+ - [4,"aa","2020-05-02"]
+ - [5,"aa","2020-05-03"]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_c8
+ type: string
+ rows:
+ - ["aa",1590738990000,1590738991999,2,"2020-05-01",null]
+ - ["aa",1590738992000,1590738993999,2,"2020-05-03",null]
+ 1:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf string" ]
+ rows:
+ - [1,"aa","2020-05-01"]
+ - [2,"aa","2020-05-02"]
+ - [3,"aa","2020-05-03"]
+ - [4,"aa","2020-05-04"]
+ - [5,"aa","2020-05-05"]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_c8
+ type: string
+ rows:
+ - ["aa",1590738990000,1590738991999,2,"2020-05-02",null]
+ - ["aa",1590738992000,1590738993999,2,"2020-05-04",null]
+ 2:
+ success: false
+ msg: fail
+ 3:
+ success: false
+ msg: fail
+ 4:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c8
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 5
+ desc: 长窗口count/avg/sum/max/min,timestamp类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](c7) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expectProvider:
+ 0:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf timestamp" ]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"aa",1590738990000]
+ - [3,"aa",1590738990000]
+ - [4,"aa",1590738991000]
+ - [5,"aa",1590738992000]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_min_c7
+# type: timestamp
+# rows:
+# - ["aa",1590738990000,1590738991999,2,1590738990000,null] # 101110010 01011111 01101110 10110011 10110000
+# - ["aa",1590738992000,1590738993999,2,1590738992000,null]
+ 1:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf timestamp" ]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"aa",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"aa",1590738993000]
+ - [5,"aa",1590738994000]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_max_c7
+# type: timestamp
+# rows:
+# - ["aa",1590738990000,1590738991999,2,1590738993000,null]
+# - ["aa",1590738992000,1590738993999,2,1590738994000,null]
+ 2:
+ order: id
+ columns: [ "id int","c1 string","w1_udaf timestamp" ]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"aa",3181477981000]
+ - [3,"aa",4772216973000]
+ - [4,"aa",4772216976000]
+ - [5,"aa",4772216979000]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_sum_c7
+# type: bigtimestampint
+# rows:
+# - ["aa",1590738990000,1590738991999,2,3181477981000,null]
+# - ["aa",1590738992000,1590738993999,2,3181477985000,null]
+ 3:
+ success: false
+ msg: fail
+ 4:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c7
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 6
+ desc: 长窗口count/avg/sum/max/min,row类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ sql: |
+ SELECT id, c1, d[0](*) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expectProvider:
+ 0:
+ success: false
+ msg: fail
+ 1:
+ success: false
+ msg: fail
+ 2:
+ success: false
+ msg: fail
+ 3:
+ success: false
+ msg: fail
+ 4:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 7
+ desc: 长窗口count/avg/sum/max/min,bool类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c9) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ expectProvider:
+ 0:
+ success: false
+ msg: fail
+ 1:
+ success: false
+ msg: fail
+ 2:
+ success: false
+ msg: fail
+ 3:
+ success: false
+ msg: fail
+ 4:
+ order: id
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c9;
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 8
+ desc: 长窗口count/avg/sum/max/min,float类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c5) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf float"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",1.1]
+ - [2,"aa",1.1]
+ - [3,"aa",1.1]
+ - [4,"aa",1.2]
+ - [5,"aa",1.3]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_min_c5;
+# type: float
+# rows:
+# - ["aa",1590738990000,1590738991999,2,1.1,null]
+# - ["aa",1590738992000,1590738993999,2,1.3,null]
+ 1:
+ rows:
+ - [1,"aa",1.1]
+ - [2,"aa",1.2]
+ - [3,"aa",1.3]
+ - [4,"aa",1.4]
+ - [5,"aa",1.5]
+ 2:
+ rows:
+ - [1,"aa",1.1]
+ - [2,"aa",2.3]
+ - [3,"aa",3.6]
+ - [4,"aa",3.9]
+ - [5,"aa",4.2]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",1.1]
+ - [2,"aa",1.15]
+ - [3,"aa",1.2]
+ - [4,"aa",1.3]
+ - [5,"aa",1.4]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c5;
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 9
+ desc: 长窗口count/avg/sum/max/min,double类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c6) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf double"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",2.1]
+ - [2,"aa",2.1]
+ - [3,"aa",2.1]
+ - [4,"aa",2.2]
+ - [5,"aa",2.3]
+ 1:
+ rows:
+ - [1,"aa",2.1]
+ - [2,"aa",2.2]
+ - [3,"aa",2.3]
+ - [4,"aa",2.4]
+ - [5,"aa",2.5]
+ 2:
+ rows:
+ - [1,"aa",2.1]
+ - [2,"aa",4.3]
+ - [3,"aa",6.6]
+ - [4,"aa",6.9]
+ - [5,"aa",7.2]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",2.1]
+ - [2,"aa",2.15]
+ - [3,"aa",2.2]
+ - [4,"aa",2.3]
+ - [5,"aa",2.4]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c6;
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+ -
+ id: 10
+ desc: 长窗口count/avg/sum/max/min,rows
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3) OVER w1 as w1_udaf FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min","max","sum","avg","count"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_udaf int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",23]
+ - [5,"aa",24]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",66]
+ - [5,"aa",69]
+ 3:
+ columns: ["id int","c1 string","w1_udaf double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",22]
+ - [5,"aa",23]
+ 4:
+ columns: ["id int","c1 string","w1_udaf bigint"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_count_c3;
+ type: bigint
+ rows:
+ - ["aa",1590738990000,1590738991999,2,2,null]
+ - ["aa",1590738992000,1590738993999,2,2,null]
+
diff --git a/cases/integration_test/long_window/test_xxx_where.yaml b/cases/integration_test/long_window/test_xxx_where.yaml
new file mode 100644
index 00000000000..7915ceb3e2b
--- /dev/null
+++ b/cases/integration_test/long_window/test_xxx_where.yaml
@@ -0,0 +1,1210 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.6.0
+cases:
+ -
+ id: 0
+ desc: 长窗口xxx_where,date类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,1,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,2,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,3,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,4,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,5,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c8,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 1
+ desc: 长窗口xxx_where,smallint类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c2,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ 1:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",4]
+ - [5,"aa",5]
+ 2:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",7]
+ - [5,"aa",12]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",3.5]
+ - [5,"aa",4]
+ -
+ id: 2
+ desc: 长窗口xxx_where,int类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",1,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",1,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_min_where_c3_c2
+ type: int
+ rows:
+ - ["aa",1590738990000,1590738991999,2,20,1]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_max_where_c3_c2
+ type: int
+ rows:
+ - ["aa",1590738990000,1590738991999,2,21,1]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ preAgg:
+ name: pre_{db_name}_{sp_name}_w1_sum_where_c3_c2
+ type: int
+ rows:
+ - ["aa",1590738990000,1590738991999,2,41,1]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+# preAgg:
+# name: pre_{db_name}_{sp_name}_w1_avg_where_c3_c2
+# type: int
+# rows:
+# - ["aa",1590738990000,1590738991999,2,20,1]
+ -
+ id: 3
+ desc: 长窗口xxx_where,bigint类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c4,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",30]
+ - [3,"aa",30]
+ - [4,"aa",31]
+ - [5,"aa",32]
+ 1:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [3,"aa",32]
+ - [4,"aa",32]
+ - [5,"aa",32]
+ 2:
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",63]
+ - [5,"aa",32]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",30.5]
+ - [3,"aa",31]
+ - [4,"aa",31.5]
+ - [5,"aa",32]
+ -
+ id: 4
+ desc: 长窗口xxx_where,string类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c1,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 5
+ desc: 长窗口xxx_where,timestamp类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c7,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 6
+ desc: 长窗口xxx_where,row类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](*,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 7
+ desc: 长窗口xxx_where,bool类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c9,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 8
+ desc: 长窗口xxx_where,float类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c5,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where float"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",1.3]
+ - [4,"aa",1.3]
+ - [5,"aa",1.3]
+ 1:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",1.3]
+ - [4,"aa",1.4]
+ - [5,"aa",1.5]
+ 2:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",1.3]
+ - [4,"aa",2.7]
+ - [5,"aa",4.2]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",1.3]
+ - [4,"aa",1.35]
+ - [5,"aa",1.4]
+ -
+ id: 9
+ desc: 长窗口xxx_where,double类型
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c6,c2>2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where double"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",2.3]
+ - [4,"aa",2.3]
+ - [5,"aa",2.3]
+ 1:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",2.3]
+ - [4,"aa",2.4]
+ - [5,"aa",2.5]
+ 2:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",2.3]
+ - [4,"aa",4.7]
+ - [5,"aa",7.2]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",2.3]
+ - [4,"aa",2.35]
+ - [5,"aa",2.4]
+ -
+ id: 10
+ desc: 长窗口xxx_where,第二个参数使用bool列
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c9) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 11
+ desc: 长窗口xxx_where,第二个参数使用=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c2,c2=4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
+ - [4,"aa",4]
+ - [5,"aa",4]
+ 1:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
+ - [4,"aa",4]
+ - [5,"aa",4]
+ 2:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
+ - [4,"aa",4]
+ - [5,"aa",4]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
+ - [4,"aa",4]
+ - [5,"aa",4]
+ -
+ id: 12
+ desc: 长窗口xxx_where,第二个参数使用!=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c2,c2!=4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",1]
+ - [3,"aa",1]
+ - [4,"aa",2]
+ - [5,"aa",3]
+ 1:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",5]
+ 2:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",3]
+ - [3,"aa",6]
+ - [4,"aa",5]
+ - [5,"aa",8]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",1.5]
+ - [3,"aa",2]
+ - [4,"aa",2.5]
+ - [5,"aa",4]
+ -
+ id: 13
+ desc: 长窗口xxx_where,第二个参数使用>=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c2,c2>=3) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where smallint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ 1:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",4]
+ - [5,"aa",5]
+ 2:
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",7]
+ - [5,"aa",12]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",3]
+ - [4,"aa",3.5]
+ - [5,"aa",4]
+ -
+ id: 14
+ desc: 长窗口xxx_where,第二个参数使用<=
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<=3) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 17
+ desc: 长窗口xxx_where,第二个参数使用and
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<4 and c2>1) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 18
+ desc: 长窗口xxx_where,第二个参数使用两个列
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ sql: |
+ SELECT id, c1, d[0](c3,c3>c2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 19
+ desc: 长窗口xxx_where,第二个参数使用嵌套
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,if_null(c2,0)>4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 20
+ desc: 长窗口xxx_where,第二个参数常量在前
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,4>c2) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 21
+ desc: 长窗口xxx_where,rows
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 22
+ desc: 长窗口xxx_where,第二个参数类型是int
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c3<23) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 23
+ desc: 长窗口xxx_where,第二个参数类型是bigint
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c4<33) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 24
+ desc: 长窗口xxx_where,第二个参数类型是float
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c5<1.35) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 25
+ desc: 长窗口xxx_where,第二个参数类型是double
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c6<2.4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 26
+ desc: 长窗口xxx_where,第二个参数类型是timestamp
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c7<1590738993000) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 27
+ desc: 长窗口xxx_where,第二个参数类型是date
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c8<"2020-05-04") OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 28
+ desc: 长窗口xxx_where,第二个参数类型是bool
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",false]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c9=true) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+ -
+ id: 29
+ desc: 长窗口xxx_where,w1:2
+ longWindow: w1:2
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 30
+ desc: 长窗口xxx_where,磁盘表
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ index: ["index1:c1:c7"]
+ storage: SSD
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01",true]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02",true]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03",true]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04",true]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05",false]
+ sql: |
+ SELECT id, c1, d[0](c3,c2<4) OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ success: false
+ msg: fail
+ -
+ id: 31
+ desc: 长窗口count_where,第二个参数类型是string
+ longWindow: w1:2s
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ index: ["index1:c1:c7:0:latest"]
+ storage: memory
+ rows:
+ - [1,"aa",1,20,30,1.1,2.1,1590738990000,"2020-05-01","true"]
+ - [2,"aa",2,21,31,1.2,2.2,1590738991000,"2020-05-02","true"]
+ - [3,"aa",3,22,32,1.3,2.3,1590738992000,"2020-05-03","true"]
+ - [4,"aa",4,23,33,1.4,2.4,1590738993000,"2020-05-04","false"]
+ - [5,"aa",5,24,34,1.5,2.5,1590738994000,"2020-05-05","false"]
+ sql: |
+ SELECT id, c1, d[0](c3,c9="true") OVER w1 as w1_where FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ dataProvider:
+ - ["min_where","max_where","sum_where","avg_where"]
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_where int"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20]
+ - [3,"aa",20]
+ - [4,"aa",21]
+ - [5,"aa",22]
+ 1:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",21]
+ - [3,"aa",22]
+ - [4,"aa",22]
+ - [5,"aa",22]
+ 2:
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",43]
+ - [5,"aa",22]
+ 3:
+ columns: ["id int","c1 string","w1_where double"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",20.5]
+ - [3,"aa",21]
+ - [4,"aa",21.5]
+ - [5,"aa",22]
+
+
diff --git a/cases/integration_test/multiple_databases/test_multiple_databases.yaml b/cases/integration_test/multiple_databases/test_multiple_databases.yaml
new file mode 100644
index 00000000000..208270b4ae5
--- /dev/null
+++ b/cases/integration_test/multiple_databases/test_multiple_databases.yaml
@@ -0,0 +1,383 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: Last Join tables from two databases 1 - default db is db1
+ db: db1
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 1
+ desc: Last Join tables from two databases 2 - default db is db, explicit db1 and db2
+ db: db
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 2
+ desc: Last join tables from 2 databases fail 1 - db2 is not exist
+ db: db1
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db3
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db1.{0}.c1, db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ success: false
+ - id: 3
+ desc: Last join tables from 2 databases fail 2 - fail to resolve column {1}.c3 default db
+ db: db1
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db1.{0}.c1, db1.{0}.c2, {1}.c3, {1}.c4 from {0} last join db2.{1} ORDER BY db2.{1}.c3 on db1.{0}.c1=db2.{1}.c1;
+ expect:
+ success: false
+ - id: 4
+ desc: 全部使用默认库
+ db: test_zw
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 5
+ desc: 指定当前库查询
+ db: test_zw
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 6
+ desc: 查询使用其他库
+ db: test_zw
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ db: db1
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select * from (select c1, c2+1 as v1,c3+1 as v2 from db1.{0}) as t1;
+ expect:
+ columns: ["c1 string", "v1 int", "v2 bigint"]
+ order: c1
+ rows:
+ - ["aa", 3,4]
+ - ["bb", 22,32]
+ - ["cc", 42,52]
+ - id: 7
+ desc: 子查询后的表使用默认库
+ db: db
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ db: db1
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select db.t1.c1 from (select c1, c2+1,c3+1 from db1.{0}) as t1;
+ expect:
+ columns: ["c1 string"]
+ order: c1
+ rows:
+ - ["aa"]
+ - ["bb"]
+ - ["cc"]
+ - id: 8
+ desc: 子查询后的表使用其他库
+ db: db
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ db: db1
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select db1.t1.c1 from (select c1, c2+1,c3+1 from db1.{0}) as t1;
+ expect:
+ success: false
+ - id: 9
+ desc: 使用子查询查不同库的数据然后lastjoin
+ tags: ["request 模式有问题,@chenjing"]
+ db: db
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db1
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db2
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db.t1.c1,db.t1.c2,db.t2.c3,db.t2.c4 from (select * from db1.{0}) as t1 last join (select * from db2.{1}) as t2 ORDER BY db.t2.c3 on db.t1.c1=db.t2.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ - id: 10
+ desc: 三表三个库拼表
+ db: db
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db1
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db2
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "bb",41,121,1590738992000 ]
+ - [ "bb",41,121,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db3
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",21,131,1590738992000 ]
+ - [ "aa",41,121,1590738991000 ]
+ - [ "bb",41,121,1590738991000 ]
+ sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c4,db3.{2}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c4 on db1.{0}.c1=db2.{1}.c1 last join db3.{2} order by db3.{2}.c4 on db1.{0}.c1=db3.{2}.c1;
+ expect:
+ columns: [ "c1 string","c2 int","c4 timestamp","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,1590738989000,1590738992000 ]
+ - [ "bb",21,1590738992000,1590738991000 ]
+ - id: 11
+ desc: 不等值拼接
+ db: db
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db1
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ db: db2
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select db1.{0}.c1,db1.{0}.c2,db2.{1}.c3,db2.{1}.c4 from db1.{0} last join db2.{1} ORDER BY db2.{1}.c4 on db1.{0}.c2 = db2.{1}.c2 and db1.{0}.c3 <= db2.{1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - id: 12
+ desc: 不同库相同表lastjoin
+ db: db
+ inputs:
+ - db: db1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ name: t1
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - db: db2
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ name: t1
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select db1.t1.c1,db1.t1.c2,db2.t1.c3,db2.t1.c4 from db1.t1 last join db2.t1 ORDER BY db2.t1.c3 on db1.t1.c1=db2.t1.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ -
+ id: 13
+ desc: window rows使用其他库
+ db: db
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ db: db1
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM db1.{0} WINDOW w1 AS (PARTITION BY db1.{0}.c1 ORDER BY db1.{0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ - id: 14
+ desc: window ROWS_RANGE 使用其他库
+ db: db
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ db: db1
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM db1.{0} WINDOW w1 AS (PARTITION BY db1.{0}.c1 ORDER BY db1.{0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,61 ]
+ - [ "aa",22,93 ]
+ - [ "aa",23,96 ]
+ - [ "bb",24,34 ]
+
+
diff --git a/cases/integration_test/out_in/test_out_in.yaml b/cases/integration_test/out_in/test_out_in.yaml
new file mode 100644
index 00000000000..e8fdefc9dc7
--- /dev/null
+++ b/cases/integration_test/out_in/test_out_in.yaml
@@ -0,0 +1,893 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: ['数据里有null、空串、特殊字符']
+cases:
+ -
+ id: 0
+ desc: 数据里有null、空串、特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ count: 6
+ -
+ id: 1
+ desc: 全部数据类型测试
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 2
+ desc: 复杂sql结果导出
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",10, 1590738990000, 3.3]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "aaaaaaaaaa"]
+ - [1590738990000, "aaaaaaaaaa"]
+ - [1590738989000, "cc"]
+ - [1590738992000, "cc"]
+ -
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"]
+ sqls:
+ - select * from
+ (select
+ id,
+ card_no,
+ trx_time,
+ substr(card_no, 1, 6) as card_no_prefix,
+ sum(trx_amt) over w30d as sum_trx_amt,
+ count(merchant_id) over w10d as count_merchant_id
+ from {0}
+ window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe
+ last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte
+ into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"]
+ - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"]
+ - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null]
+ -
+ id: 3
+ desc: 全部数据类型测试
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 4
+ desc: 执行其他库查询
+ inputs:
+ -
+ db: db1
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from db1.{0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 5
+ desc: 导出insert结果
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - insert into {0} values (1,"aa",1590738989000) outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 6
+ desc: sql执行错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from db1.{0} into outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 7
+ desc: mode默认值,文件已经存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {0} into outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 8
+ desc: mode=overwrite,先到处大数据量,再到处小数据量
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='overwrite');
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 9
+ desc: mode=append,相同的表到处两次
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {0} into outfile '{0}.csv' options(mode='append',header=false);
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 10
+ desc: mode=append,不同的表导出,第二次header=false
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='append',header=false);
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 11
+ desc: mode=append,不同的表导出,第二次header=true
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='append',header=true);
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 2,bb,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,cc,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ -
+ id: 12
+ desc: option key错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(head=true);
+ expect:
+ success: false
+ -
+ id: 13
+ desc: option header 值错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header='true');
+ expect:
+ success: false
+ -
+ id: 14
+ desc: format 其他格式
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(format='txt');
+ expect:
+ success: false
+ -
+ id: 15
+ desc: delimiter为一些特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(delimiter='@');
+ - load data infile '{0}.csv' into table {1} options(delimiter='@');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 16
+ desc: null_value为特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='~!@#$%^&*()_+');
+ - load data infile '{0}.csv' into table {1} options(null_value='~!@#$%^&*()_+');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 17
+ desc: String 有null 空串 ”null“ null_value为”“
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='');
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 5,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,
+ - 4,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,
+ - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ -
+ id: 18
+ desc: String 有null 空串 ”null“ null_value为”null“
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='null');
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 5,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null
+ - 4,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null
+ - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ -
+ id: 19
+ desc: header=false导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=false);
+ - load data infile '{0}.csv' into table {1} options(header=false);
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 20
+ desc: format=csv,导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(format='csv');
+ - load data infile '{0}.csv' into table {1} options(format='csv');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 21
+ desc: 路径文件夹不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '/{0}/{0}.csv';
+ expect:
+ success: false
+ -
+ id: 22
+ desc: 数据类型不匹配
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 int","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 23
+ desc: header=true导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1} options(header=true);
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 24
+ desc: header=true,csv没有header
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=false);
+ - load data infile '{0}.csv' into table {1} options(header=true);
+ expect:
+ success: false
+ -
+ id: 25
+ desc: header=false,csv有header
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1} options(header=false);
+ expect:
+ success: false
+ -
+ id: 26
+ desc: 表不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1}11 options(header=true);
+ expect:
+ success: false
+ -
+ id: 27
+ desc: format=csv,csv格式的文件,文件名不是csv结尾
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.txt' ;
+ - load data infile '{0}.txt' into table {1} options(format='csv');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 28
+ desc: format=其他值
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1} options(format='txt');
+ expect:
+ success: false
+ -
+ id: 29
+ desc: 路径错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}1.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 30
+ desc: 导入其他库的表
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ db: db1
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table db1.{1};
+ - select * from db1.{1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 31
+ desc: 导出后导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {0};
+ - select * from {0};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 32
+ desc: 创建表的列和csv对不上
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","cc smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}1.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 33
+ desc: 表中已经有数据,然后导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 34
+ desc: delimiter为,数据中有,
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"b,b",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1} options(delimiter=',');
+ expect:
+ success: false
+ -
+ id: 35
+ desc: 导入-null_value=null
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='null');
+ - load data infile '{0}.csv' into table {1} options(null_value='null');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 36
+ desc: 导入-null_value=空串
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='');
+ - load data infile '{0}.csv' into table {1} options(null_value='');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 37
+ desc: 表删除后再次导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ - [2,"bb",1590738990000]
+ - [3,"cc",1590738991000]
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - drop table {1};
+ - create table {1}(
+ id int,
+ c1 string,
+ c7 timestamp,
+ index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1);
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c7 timestamp"]
+ order: id
+ rows:
+ - [1,"aa",1590738989000]
+ - [2,"bb",1590738990000]
+ - [3,"cc",1590738991000]
+ -
+ id: 38
+ desc: mode 值错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(mode='true');
+ expect:
+ success: false
+
+
+
diff --git a/cases/integration_test/select/test_select_sample.yaml b/cases/integration_test/select/test_select_sample.yaml
new file mode 100644
index 00000000000..3a2e0e164f3
--- /dev/null
+++ b/cases/integration_test/select/test_select_sample.yaml
@@ -0,0 +1,313 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: ["limit 0","结合limit","limit 1","limit条数大于表的条数"]
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 查询所有列
+ inputs:
+ - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select c1,c3,c4,c5,c6,c7,c8 from {0};
+ expect:
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - id: 1
+ desc: 查询部分列
+ inputs:
+ - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select c1,c3,c4 from {0};
+ expect:
+ columns: ["c1 string","c3 int","c4 bigint"]
+ rows:
+ - ["aa",2,3]
+ - id: 2
+ desc: 查询*
+ inputs:
+ - columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ sql: select * from {0};
+ expect:
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ rows:
+ - ["aa",2,3,1.1,2.1,1590738989000,"2020-05-01"]
+ - id: 3
+ desc: 查询列中部分重命名
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1 as name,c2,c3,c4 from {0};
+ expect:
+ columns: ["name string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - id: 4
+ desc: 查询列中全部重命名
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1 as name,c2 as v2,c3 as v3 ,c4 as v4 from {0};
+ expect:
+ columns: ["name string","v2 int","v3 bigint","v4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - id: 5
+ desc: 查询的列部分带表名
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1 as name,{0}.c2,c3,c4 from {0};
+ expect:
+ columns: ["name string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - id: 6
+ desc: 查询的表不存在
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1,c2,c3,c4 from {0}1;
+ expect:
+ success: false
+ - id: 7
+ desc: 查询的列不存在
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1,c2,c3,c5 from {0};
+ expect:
+ success: false
+ - id: 8
+ desc: 查询的数据中有空串
+ mode: cli-unsupport
+ inputs:
+ - columns: ["c1 string","c2 int","c3 string","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["",2,"",1590738989000]
+ sql: select c1,c2,c3,c4 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 string","c4 timestamp"]
+ rows:
+ - ["",2,"",1590738989000]
+ - id: 9
+ desc: 查询的数据中有null
+ inputs:
+ - columns: ["c1 string","c2 int","c3 string","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [NULL,2,NULL,1590738989000]
+ sql: select c1,c2,c3,c4 from {0};
+ expect:
+ columns: ["c1 string","c2 int","c3 string","c4 timestamp"]
+ rows:
+ - [NULL,2,NULL,1590738989000]
+ - id: 10
+ desc: 结合limit
+ mode: request-unsupport
+ tags: ["TODO", "@zhaowei"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select c1,c2,c3,c4 from {0} limit 2;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["cc",41,51,1590738991000]
+ - id: 11
+ desc: limit 1
+ mode: request-unsupport
+ tags: ["TODO", "@zhaowei"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select c1,c2,c3,c4 from {0} limit 1;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - id: 12
+ mode: request-unsupport
+ desc: limit 0
+ tags: ["TODO", "@zhaowei"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select c1,c2,c3,c4 from {0} limit 0;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ order: c1
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ - id: 13
+ desc: limit条数大于表的条数
+ mode: request-unsupport
+ tags: ["TODO","@zhaoweiLIMIT单独测,现在先别测"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select c1,c2,c3,c4 from {0} limit 4;
+ expect:
+ columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ order: c1
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ - id: 14
+ desc: 查询常量
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ tags: ["常量fesql和mysql类型不配"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1 as name,c2,c3,c4,1 from {0};
+ expect:
+ columns: ["name string","c2 int","c3 bigint","c4 timestamp","1 int"]
+ rows:
+ - ["aa",2,3,1590738989000,1]
+ - id: 15
+ desc: 查询的列带表名和别名
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select {0}.c1 as name,{0}.c2 as t_c2,{0}.c3 as t_c3,{0}.c4 as t_c4 from {0};
+ expect:
+ columns: ["name string","t_c2 int","t_c3 bigint","t_c4 timestamp"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - id: 16
+ desc: 查询表达式使用表名
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ tags: ["表达式计算结果fesql和mysql类型不配"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ sql: select c1 as name,{0}.c2+1 as t_c2,c3,c4 from {0};
+ expect:
+ columns: ["name string","t_c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["aa",3,3,1590738989000]
+ - id: 17
+ desc: 查询函数表达式使用表名
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["123456789",2,3,1590738989000]
+ sql: select substr({0}.c1, 3, 6) as name,{0}.c2+1 as t_c2,c3,c4 from {0};
+ expect:
+ columns: ["name string","t_c2 int","c3 bigint","c4 timestamp"]
+ rows:
+ - ["345678",3,3,1590738989000]
+ - id: 18
+ desc: column name prefix with _
+ mode: offline-unsupport
+ sqlDialect: ["HybridSQL"]
+ tags: ["@chendihao, @baoxinqi, support simple project node with column cast"]
+ inputs:
+ - columns: ["_c1 int", "_c2 string", "_c5 bigint"]
+ indexs: ["index1:_c1:_c5"]
+ rows:
+ - [1, "2020-05-22 10:43:40", 1]
+ sql: |
+ select _c1, bigint(_c2) DIV 1000 as _c2_sec from (select _c1, timestamp(_c2) as _c2 from {0});
+ expect:
+ columns: ["_c1 int", "_c2_sec bigint"]
+ rows:
+ - [1, 1590115420]
+ - id: 19
+ desc: 全表聚合
+ mode: rtidb-unsupport,offline-unsupport,cli-unsupport
+ db: db1
+ sqlDialect: ["HybridSQL", "MYSQL"]
+ sql: |
+ SELECT SUM(col1) as sum_col1, COUNT(col1) as cnt_col1, MAX(col1) as max_col1,
+ MIN(col1) as min_col1, AVG(col1) as avg_col1 FROM {0};
+ inputs:
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
+ rows:
+ - [0, 1, 5, 1.1, 11.1, 1, 1]
+ - [0, 2, 5, 2.2, 22.2, 2, 22]
+ - [1, 3, 55, 3.3, 33.3, 1, 333]
+ - [1, 4, 55, 4.4, 44.4, 2, 4444]
+ - [2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
+ batch_plan: |
+ PROJECT(type=Aggregation)
+ DATA_PROVIDER(table=auto_t0)
+ expect:
+ columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"]
+ order: sum_col1
+ rows:
+ - [15, 5, 5, 1, 3]
+ -
+ id: 14
+ desc: 不指定索引,插入数据,可查询
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ sql: select * from {0};
+ expect:
+ columns : ["id int","c1 int","c2 smallint","c3 float","c4 double","c5 bigint","c6 string","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,1,2,3.3,4.4,5,"aa",12345678,"2020-05-21",true]
\ No newline at end of file
diff --git a/cases/integration_test/select/test_sub_select.yaml b/cases/integration_test/select/test_sub_select.yaml
new file mode 100644
index 00000000000..f7b89154011
--- /dev/null
+++ b/cases/integration_test/select/test_sub_select.yaml
@@ -0,0 +1,358 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 正常使用子查询
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ mode: cli-unsupport
+ tags: ["mysql要求派生表必须有别名"]
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select * from (select c1, c2+1,c3+1 from {0});
+ expect:
+ columns: ["c1 string", "c2 + 1 int", "c3 + 1 bigint"]
+ order: c1
+ rows:
+ - ["aa", 3,4]
+ - ["bb", 22,32]
+ - ["cc", 42,52]
+ -
+ id: 1
+ desc: 子查询使列别名
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select v2,v3 from (select c2+1 as v2,c3+1 as v3 from {0}) as t;
+ expect:
+ columns: ["v2 int","v3 bigint"]
+ order: v2
+ rows:
+ - [3,4]
+ - [22,32]
+ - [42,52]
+ -
+ id: 2
+ desc: 子查询使用常量
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select * from (select c2+1 as v2,c3+1 as v3,1 as v4 from {0}) as t;
+ expect:
+ columns: ["v2 int","v3 bigint","v4 int"]
+ order: v2
+ rows:
+ - [3,4,1]
+ - [22,32,1]
+ - [42,52,1]
+ -
+ id: 3
+ desc: 子查询中有空串
+ mode: cli-unsupport
+ sqlDialect: ["HybridSQL","SQLITE3"]
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select * from (select c1,c2+1 as v2,c3+1 as v3,1 as v4 from {0}) as t;
+ expect:
+ columns: ["c1 string","v2 int","v3 bigint","v4 int"]
+ order: c1
+ rows:
+ - ["",22,32,1]
+ - ["aa",3,4,1]
+ - ["cc",42,52,1]
+ -
+ id: 4
+ desc: 子查询中有null
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa",2,3,1590738989000]
+ - [2,NULL,21,31,1590738990000]
+ - [3,"cc",41,51,1590738991000]
+ sql: select * from (select id,c1,c3+1 as v3 from {0}) as t;
+ expect:
+ columns: ["id int","c1 string","v3 bigint"]
+ order: id
+ rows:
+ - [1,"aa",4]
+ - [2,null,32]
+ - [3,"cc",52]
+ -
+ id: 5
+ desc: 查询时列不在子查询中
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ - ["cc",41,51,1590738991000]
+ sql: select v5 from (select c1,c2+1 as v2,c3+1 as v3,1 as v4 from {0});
+ expect:
+ success: false
+ -
+ id: 6
+ desc: last join 子查询和副表, 子查询包含window
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",10, 1590738990000, 3.3]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "aaaaaaaaaa"]
+ - [1590738990000, "aaaaaaaaaa"]
+ - [1590738989000, "cc"]
+ - [1590738992000, "cc"]
+ sql: select * from
+ (select
+ id,
+ card_no,
+ trx_time,
+ substr(card_no, 1, 6) as card_no_prefix,
+ sum(trx_amt) over w30d as sum_trx_amt,
+ count(merchant_id) over w10d as count_merchant_id
+ from {0}
+ window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe
+ last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte;
+ expect:
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string",
+ "sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp",
+ "crd_nbr string"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"]
+ - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"]
+ - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null]
+ -
+ id: 7
+ desc: window样本表和副表都作子查询
+ sqlDialect: ["HybridSQL"]
+ mode: python-unsupport, cluster-unsupport,cli-unsupport
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738991000, 2.2]
+ - [3, "bb",10, 1590738990000, 3.3]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "aaaaaaaaaa"]
+ - [1590738990000, "aaaaaaaaaa"]
+ - [1590738989000, "cc"]
+ - [1590738992000, "cc"]
+ sql: |
+ select
+ id,
+ card_no,
+ trx_time,
+ substr(card_no, 1, 6) as card_no_prefix,
+ sum(trx_amt) over w30d as w30d_amt_sum,
+ count(id) over w10d as w10d_id_cnt
+ from (select id, card_no, trx_time, trx_amt from {0}) as t_instance
+ window w30d as (PARTITION BY card_no ORDER BY trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (UNION (select 0 as id, crd_nbr as card_no, crd_lst_isu_dte as trx_time, 0.0f as trx_amt from {1}) PARTITION BY card_no ORDER BY trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string",
+ "w30d_amt_sum float", "w10d_id_cnt int64"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 2]
+ - [2, "aaaaaaaaaa", 1590738991000, "aaaaaa", 3.3, 4]
+ - [3, "bb", 1590738990000, "bb", 3.3, 1]
+ -
+ id: 8
+ desc: window样本表和副表都作子查询,INSTANCE_NOT_IN_WINDOW
+ sqlDialect: ["HybridSQL"]
+ mode: python-unsupport
+ inputs:
+ -
+ columns : ["id int", "user_id string", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:user_id:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa", "xxx", 1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa", "xxx", 1, 1590738991000, 2.2]
+ - [3, "bb", "000", 10, 1590738990000, 3.3]
+ - [4, "cc", "zzz", 20, 1590738993000, 4.4]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string", "account_amt double"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "xxx", 100.0]
+ - [1590738990000, "xxx", 200.0]
+ - [1590738990000, "yyy", 300.0]
+ - [1590738989000, "zzz", 400.0]
+ - [1590738992000, "zzz", 500.0]
+ sql: |
+ select id as out2_id,
+ crd_nbr,
+ count(id) over w10d as w10d_id_cnt,
+ sum(account_amt) over w10d as w10d_total_account_amt
+ from (select id as id, trx_time as crd_lst_isu_dte, card_no as crd_nbr, 0.0 as account_amt from {0}) as t_instance
+ window w10d as (UNION (select 0 as id, crd_lst_isu_dte, crd_nbr, account_amt from {1})
+ PARTITION BY crd_nbr ORDER BY crd_lst_isu_dte ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+
+ expect:
+ columns: ["out2_id int", "crd_nbr string", "w10d_id_cnt int64", "w10d_total_account_amt double"]
+ order: out2_id
+ rows:
+ - [1, "xxx", 2, 100.0]
+ - [2, "xxx", 3, 300.0]
+ - [3, "000", 1, 0.0]
+ - [4, "zzz", 3, 900.0]
+ -
+ id: 9
+ desc: 特征拼接
+ mode: offline-unsupport, python-unsupport,cli-unsupport
+ sqlDialect: ["HybridSQL"]
+ inputs:
+ -
+ columns : ["id int", "user_id string", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+
+ indexs: ["index1:user_id:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa", "xxx", 1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa","xxx", 1, 1590738991000, 2.2]
+ - [3, "bb", "000", 10, 1590738990000, 3.3]
+ - [4, "cc", "zzz", 20, 1590738993000, 4.4]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string", "account_amt double"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "xxx", 100.0]
+ - [1590738990000, "xxx", 200.0]
+ - [1590738990000, "yyy", 300.0]
+ - [1590738989000, "zzz", 400.0]
+ - [1590738992000, "zzz", 500.0]
+ sql: |
+ select * from
+ ( select
+ id as out1_id,
+ user_id,
+ trx_time,
+ sum(trx_amt) over w30d as w30d_amt_sum
+ from (select id, user_id, trx_time, trx_amt from {0}) as t_instance
+ window w30d as (PARTITION BY user_id ORDER BY trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW)
+ ) as out1 last join
+ ( select id as out2_id,
+ crd_nbr,
+ count(id) over w10d as w10d_id_cnt,
+ sum(account_amt) over w10d as w10d_total_account_amt
+ from (select id as id, trx_time as crd_lst_isu_dte, card_no as crd_nbr, 0.0 as account_amt from {0}) as t_instance
+ window w10d as (UNION (select 0 as id, crd_lst_isu_dte, crd_nbr, account_amt from {1})
+ PARTITION BY crd_nbr ORDER BY crd_lst_isu_dte ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW)) as out2
+ on out1.out1_id=out2.out2_id;
+
+ expect:
+ columns: ["out1_id int", "user_id string", "trx_time timestamp",
+ "w30d_amt_sum float", "out2_id int", "crd_nbr string", "w10d_id_cnt int64", "w10d_total_account_amt double"]
+ order: out1_id
+ rows:
+ - [1, "aaaaaaaaaa", 1590738989000, 1.1, 1, "xxx", 2, 100.0]
+ - [2, "aaaaaaaaaa", 1590738991000, 3.3, 2, "xxx", 3, 300.0]
+ - [3, "bb", 1590738990000, 3.3, 3, "000", 1, 0.0]
+ - [4, "cc", 1590738993000, 4.4, 4, "zzz", 3, 900.0]
+ -
+ id: 10
+ desc: 子查询使列别名重名
+ sqlDialect: ["HybridSQL"]
+ tags: ["mysql报错"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa",2,3,1590738989000]
+ - [2,"bb",21,31,1590738990000]
+ - [3,"cc",41,51,1590738991000]
+ sql: select * from (select id,c2+1 as v2,c3+1 as v2 from {0}) as t;
+ expect:
+ columns: ["id int","v2 int","v2 bigint"]
+ order: id
+ rows:
+ - [1,3,4]
+ - [2,22,32]
+ - [3,42,52]
+ -
+ id: 11
+ desc: 子查询使列别名重名,并同时select
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa",2,3,1590738989000]
+ - [2,"bb",21,31,1590738990000]
+ - [3,"cc",41,51,1590738991000]
+ sql: select id,v2,v2 from (select id,c2+1 as v2,c3+1 as v2 from {0});
+ expect:
+ success: false
+ -
+ id: 15
+ desc: 不指定索引,进行子查询操作
+ inputs:
+ - columns: [ "id int not null","c1 int not null","c2 smallint not null","c3 float not null","c4 double not null","c5 bigint not null","c6 string not null","c7 timestamp not null","c8 date not null","c9 bool not null" ]
+ rows:
+ - [ 1, 1, 2, 3.3, 4.4, 5, "aa", 12345678, "2020-05-21", true ]
+ sql: select c1,c2 from (select id as c1,c1 as c2,c7 as c3 from {0});
+ expect:
+ columns : ["c1 int","c2 int"]
+ order: id
+ rows:
+ - [1,1]
diff --git a/cases/integration_test/select/test_where.yaml b/cases/integration_test/select/test_where.yaml
new file mode 100644
index 00000000000..bab58665998
--- /dev/null
+++ b/cases/integration_test/select/test_where.yaml
@@ -0,0 +1,252 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+sqlDialect: ["HybridSQL"]
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: Where条件命中索引
+ mode: request-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5;
+ inputs:
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ batch_plan: |
+ SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6))
+ FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ order: col1
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ - id: 1-1
+ desc: Where部分条件命中索引, col1>3条件未命中
+ mode: request-unsupport, offline-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 > 3;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col2:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ batch_plan: |
+ SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6))
+ FILTER_BY(condition=col1 > 3, left_keys=(), right_keys=(), index_keys=(55))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ order: col1
+ data: |
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ - id: 1-1
+ desc: Where部分条件命中索引, col1=3条件未命中
+ mode: request-unsupport, offline-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 = 3;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col2:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ batch_plan: |
+ SIMPLE_PROJECT(sources=(col0, col1, col2, col3, col4, col5, col6))
+ FILTER_BY(condition=3 = col1, left_keys=(), right_keys=(), index_keys=(55))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ order: col1
+ data: |
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ - id: 2-1
+ desc: Where条件未命中索引
+ mode: request-unsupport
+ tags: ["OnlineServing不支持,Training可以支持"]
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=55 and col1 > 1;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col6:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ expect:
+ success: true
+ columns: [ "col0 string", "col1 int", "col2 smallint", "col3 float", "col4 double", "col5 bigint", "col6 string" ]
+ order: col1
+ rows:
+ - [ 1, 3, 55, 3.300000, 33.300000, 1, 333 ]
+ - [ 1, 4, 55, 4.400000, 44.400000, 2, 4444 ]
+ - [ 2, 5, 55, 5.500000, 55.500000, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ]
+
+ - id: 2-2
+ desc: Where条件未命中索引-离线支持
+ mode: rtidb-unsupport,cli-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=5 and col1 < 2;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col6:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ expect:
+ schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ order: col1
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ - id: 3-1
+ desc: Where条件未命中索引示例2
+ mode: request-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col2=col3 and col1 < 2;
+ inputs:
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ expect:
+ success: true
+ columns: [ "col0 string", "col1 int", "col2 smallint", "col3 float", "col4 double", "col5 bigint", "col6 string" ]
+ rows:
+
+ - id: 3-2
+ desc: Where条件未命中索引示例2
+ mode: rtidb-unsupport,cli-unsupport
+ db: db1
+ sql: |
+ SELECT col0, col1, col2, col3, col4, col5, col6 FROM {0} where col1=col5 and col1 > 1;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col2:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ expect:
+ schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ order: col1
+ data: |
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ - id: 4
+ desc: Where条件命中索引,索引穿透简单子查询
+ mode: request-unsupport
+ db: db1
+ sql: |
+ SELECT c0, c1, c2, c3, c4, c5, c6, c1+c4 as c14 FROM
+ (select col0 as c0, col1 as c1, col2 as c2, 0.0f as c3, col4 as c4, col5 as c5, "empty_str" as c6 from {0}) as t1 where t1.c2=5;
+ inputs:
+ - schema: col0:string, col1:int32, col2:int16, col3:float, col4:double, col5:int64, col6:string
+ index: index1:col2:col5
+ data: |
+ 0, 1, 5, 1.1, 11.1, 1, 1
+ 0, 2, 5, 2.2, 22.2, 2, 22
+ 1, 3, 55, 3.3, 33.3, 1, 333
+ 1, 4, 55, 4.4, 44.4, 2, 4444
+ 2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ batch_plan: |
+ PROJECT(type=TableProject)
+ FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5))
+ RENAME(name=t1)
+ SIMPLE_PROJECT(sources=(col0 -> c0, col1 -> c1, col2 -> c2, 0.000000 -> c3, col4 -> c4, col5 -> c5, empty_str -> c6))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ schema: c0:string, c1:int32, c2:int16, c3:float, c4:double, c5:int64, c6:string, c14:double
+ order: c1
+ data: |
+ 0, 1, 5, 0.0, 11.1, 1, empty_str, 12.1
+ 0, 2, 5, 0.0, 22.2, 2, empty_str, 24.2
+ - id: 5
+ desc: lastjoin+Where,包含重复列名
+ mode: request-unsupport, rtidb-unsupport
+# tags: ["TODO", "@chenjing", "0.3.0", "fail to compute where condition bug"]
+ db: db1
+ inputs:
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp", "c5 int"]
+ indexs: ["index1:c5:c4"]
+ rows:
+ - ["aa",2,3,1590738989000, 100]
+ - ["bb",21,31,1590738990000, 200]
+ - columns: ["c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: ["index1:c3:c4"]
+ rows:
+ - ["aa",2,3,1590738989000]
+ - ["bb",21,31,1590738990000]
+ sql: select {0}.c1,{1}.c1,{0}.c2,{1}.c3,{1}.c4,{0}.c5 from {0} last join {1} on {0}.c3={1}.c3 where c5 = 100;
+ expect:
+ columns: ["c1 string","c1 string", "c2 int","c3 bigint", "c4 timestamp", "c5 int"]
+ rows:
+ - ["aa","aa",2,3,1590738989000, 100]
+ - id: 6-1
+ desc: Where条件后全表聚合
+# tags: ["TODO","batch exec failed"]
+ mode: request-unsupport
+ db: db1
+ sql: |
+ SELECT SUM(col1) as sum_col1, COUNT(col1) as cnt_col1, MAX(col1) as max_col1,
+ MIN(col1) as min_col1, AVG(col1) as avg_col1 FROM {0} where col2=5;
+ inputs:
+ - columns: ["col0 string", "col1 int32", "col2 int16", "col3 float", "col4 double", "col5 int64", "col6 string"]
+ indexs: ["index1:col2:col5"]
+ rows:
+ - [0, 1, 5, 1.1, 11.1, 1, 1]
+ - [0, 2, 5, 2.2, 22.2, 2, 22]
+ - [1, 3, 55, 3.3, 33.3, 1, 333]
+ - [1, 4, 55, 4.4, 44.4, 2, 4444]
+ - [2, 5, 55, 5.5, 55.5, 3, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
+ batch_plan: |
+ PROJECT(type=Aggregation)
+ FILTER_BY(condition=, left_keys=(), right_keys=(), index_keys=(5))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+
+ expect:
+ columns: ["sum_col1 int32", "cnt_col1 int64", "max_col1 int32", "min_col1 int32", "avg_col1 double"]
+ order: sum_col1
+ rows:
+ - [3, 2, 2, 1, 1.5]
diff --git a/cases/integration_test/spark/generate_yaml_case.py b/cases/integration_test/spark/generate_yaml_case.py
new file mode 100755
index 00000000000..de8551cc70c
--- /dev/null
+++ b/cases/integration_test/spark/generate_yaml_case.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -*- coding: utf-8 -*-
+
+# pip3 install -U ruamel.yaml pyspark first
+import argparse
+from datetime import date
+import random
+import string
+import time
+import sys
+
+import pyspark
+import pyspark.sql
+from pyspark.sql.types import *
+import ruamel.yaml as yaml
+from ruamel.yaml import RoundTripDumper, RoundTripLoader
+
+from ruamel.yaml.scalarstring import LiteralScalarString, DoubleQuotedScalarString
+
+YAML_TEST_TEMPLATE = """
+db: test_db
+cases:
+ - id: 1
+ desc: yaml 测试用例模版
+ inputs: []
+ sql: |
+ select * from t1
+ expect:
+ success: true
+"""
+
+INPUT_TEMPLATE = """
+ columns: []
+ indexs: []
+ rows: []
+"""
+
+
+def random_string(prefix, n):
+ return "{}_{}".format(prefix, ''.join(random.choices(string.ascii_letters + string.digits, k=n)))
+
+# random date in current year
+def random_date():
+ start_dt = date.today().replace(day=1, month=1).toordinal()
+ end_dt = date.today().toordinal()
+ random_day = date.fromordinal(random.randint(start_dt, end_dt))
+ return random_day
+
+def to_column_str(field):
+ tp = '{unknown_type}'
+ if isinstance(field.dataType, BooleanType):
+ tp = 'bool'
+ elif isinstance(field.dataType, ShortType):
+ tp = 'int16'
+ elif isinstance(field.dataType, IntegerType):
+ tp = 'int32'
+ elif isinstance(field.dataType, LongType):
+ tp = 'int64'
+ elif isinstance(field.dataType, StringType):
+ tp = 'string'
+ elif isinstance(field.dataType, TimestampType):
+ tp = 'timestamp'
+ elif isinstance(field.dataType, DateType):
+ tp = 'date'
+ elif isinstance(field.dataType, DoubleType):
+ tp = 'double'
+ elif isinstance(field.dataType, FloatType):
+ tp = 'float'
+
+ return "%s %s" % (field.name, tp)
+
+def random_row(schema):
+ row = []
+ for field_schema in schema.fields:
+ field_type = field_schema.dataType
+ if isinstance(field_type, BooleanType):
+ row.append(random.choice([True, False]))
+ elif isinstance(field_type, ShortType):
+ row.append(random.randint(- (1 << 15), 1 << 15 - 1))
+ elif isinstance(field_type, IntegerType):
+ row.append(random.randint(- (1 << 31), 1 << 31 - 1))
+ elif isinstance(field_type, LongType):
+ row.append(random.randint(-(1 << 63), 1 << 63 - 1))
+ elif isinstance(field_type, StringType):
+ row.append(random_string(field_schema.name, 10))
+ elif isinstance(field_type, TimestampType):
+ # in milliseconds
+ row.append(int(time.time()) * 1000)
+ elif isinstance(field_type, DateType):
+ row.append(random_date())
+ elif isinstance(field_type, DoubleType):
+ row.append(random.uniform(-128.0, 128.0))
+ elif isinstance(field_type, FloatType):
+ row.append(random.uniform(-128.0, 128.0))
+ else:
+ row.append('{unknown}')
+
+ return row
+
+
+def to_string(value):
+ if isinstance(value, date):
+ return DoubleQuotedScalarString(value.strftime("%Y-%m-%d"))
+ if isinstance(value, float):
+ return float("%.2f" % value)
+ if isinstance(value, str):
+ return DoubleQuotedScalarString(value)
+ return value
+
+
+sess = None
+def gen_inputs_column_and_rows(parquet_file, table_name=''):
+ global sess
+ if sess is None:
+ sess = pyspark.sql.SparkSession(pyspark.SparkContext())
+ dataframe = sess.read.parquet(parquet_file)
+ hdfs_schema = dataframe.schema
+ schema = [DoubleQuotedScalarString(to_column_str(f)) for f in hdfs_schema.fields]
+
+ table = yaml.load(INPUT_TEMPLATE, Loader=RoundTripLoader)
+
+ if table_name:
+ table['name'] = table_name
+
+ table['columns'] = schema
+
+ data_set = []
+ row_cnt = random.randint(1, 10)
+ for _ in range(row_cnt):
+ data_set.append(random_row(hdfs_schema))
+
+ table['rows'] = [list(map(to_string, row)) for row in data_set]
+ return table
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--sql", required=True, help="sql text path")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("--schema-file", help="path to hdfs content(in parquet format), used to detect table schema")
+ group.add_argument("--schema-list-file", help="list file conataining a list of hdfs files, \"table_name: file path\" per line")
+ parser.add_argument("--output", required=True, help="path to output yaml file")
+ args = parser.parse_args()
+
+ sql = args.sql
+ schema_file = args.schema_file
+ schema_list_file = args.schema_list_file
+ output = args.output
+
+ yaml_test = yaml.load(YAML_TEST_TEMPLATE, Loader=RoundTripLoader, preserve_quotes=True)
+
+ if schema_file:
+ tb = gen_inputs_column_and_rows(schema_file)
+ yaml_test['cases'][0]['inputs'].append(tb)
+ elif schema_list_file:
+ with open(schema_list_file, 'r') as l:
+ for schema_file in l:
+ sf = schema_file.strip()
+ if not sf:
+ continue
+ table_name, parquet_file, *_ = sf.split(':')
+
+ parquet_file = parquet_file.strip()
+ if parquet_file:
+ tb = gen_inputs_column_and_rows(parquet_file, table_name)
+ yaml_test['cases'][0]['inputs'].append(tb)
+ else:
+ print("error")
+ sys.exit(1)
+
+
+ with open(sql, 'r') as f:
+ yaml_test['cases'][0]['sql'] = LiteralScalarString(f.read().strip())
+
+ with open(output, 'w') as f:
+ f.write(yaml.dump(yaml_test, Dumper=RoundTripDumper, allow_unicode=True))
+
diff --git a/cases/integration_test/spark/requirements.txt b/cases/integration_test/spark/requirements.txt
new file mode 100644
index 00000000000..257735c8ec6
--- /dev/null
+++ b/cases/integration_test/spark/requirements.txt
@@ -0,0 +1,3 @@
+py4j==0.10.9
+pyspark==3.1.3
+ruamel.yaml==0.16.12
diff --git a/cases/integration_test/spark/test_ads.yaml b/cases/integration_test/spark/test_ads.yaml
new file mode 100644
index 00000000000..43d889969ff
--- /dev/null
+++ b/cases/integration_test/spark/test_ads.yaml
@@ -0,0 +1,176 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: template_name
+cases:
+- id: 1
+ desc: 单表-广告场景
+ inputs:
+ - columns:
+ - "id string"
+ - "time timestamp"
+ - "C1 string"
+ - "banner_pos int32"
+ - "site_id string"
+ - "site_domain string"
+ - "site_category string"
+ - "app_id string"
+ - "app_domain string"
+ - "app_category string"
+ - "device_id string"
+ - "device_ip string"
+ - "device_model string"
+ - "device_type string"
+ - "device_conn_type string"
+ - "C14 string"
+ - "C15 string"
+ - "C16 string"
+ - "C17 string"
+ - "C18 string"
+ - "C19 string"
+ - "C20 string"
+ - "C21 string"
+ - "click int32"
+ indexs: ["index1:device_ip:time"]
+ rows:
+ - - "id_XfRHH4kXfh"
+ - 1609398202000
+ - "C1_AXkRcXx3Kw"
+ - -2136663223
+ - "site_id_eDHW3HhKq1"
+ - "site_domain_BiGZfMhPi4"
+ - "site_category_fRuxhKkzG7"
+ - "app_id_qU7KTLhbfd"
+ - "app_domain_89LBfwJOX6"
+ - "app_category_6ZYuZwBFU8"
+ - "device_id_wblCHgZ5XS"
+ - "device_ip_QghSozyTkL"
+ - "device_model_npId0EBZlF"
+ - "device_type_FC6ZCotmB0"
+ - "device_conn_type_ZDYT1Ax9Ms"
+ - "C14_fp4R2g2zVQ"
+ - "C15_uMIOpZgomo"
+ - "C16_mdReYZ82da"
+ - "C17_BHAroEq4Oa"
+ - "C18_tg1duoMypp"
+ - "C19_Bk6GldZeSl"
+ - "C20_LHuXYsBnVj"
+ - "C21_JasNjK98O3"
+ - 13560844
+ - - "id_CcZoKjZdWh"
+ - 1609398202000
+ - "C1_xu9l18vaoM"
+ - -2064473435
+ - "site_id_JTwfcebGpx"
+ - "site_domain_DrGpN7fHxB"
+ - "site_category_VnKBVLPjCN"
+ - "app_id_fFOUOMIZb2"
+ - "app_domain_WEH14cif3z"
+ - "app_category_5SDJL3MMbz"
+ - "device_id_BYRnezWSFI"
+ - "device_ip_UzE2rMHw3i"
+ - "device_model_eEvfxxZu2H"
+ - "device_type_WSyKKMDHzw"
+ - "device_conn_type_ImtQtq1M0h"
+ - "C14_N6KNpoRxB7"
+ - "C15_NoqO6r3LI0"
+ - "C16_5SkwZizokc"
+ - "C17_Ubxmmk7l7D"
+ - "C18_mhmpWVGnvx"
+ - "C19_MEZPm43rbw"
+ - "C20_20PAS4g6pi"
+ - "C21_jBaglxDzWN"
+ - -1234570441
+ sql: |-
+ select
+ id as id_1,
+ id as t1_id_original_0,
+ `time` as t1_time_original_1,
+ C1 as t1_C1_original_2,
+ banner_pos as t1_banner_pos_original_3,
+ site_id as t1_site_id_original_4,
+ site_domain as t1_site_domain_original_5,
+ site_category as t1_site_category_original_6,
+ app_id as t1_app_id_original_7,
+ app_domain as t1_app_domain_original_8,
+ app_category as t1_app_category_original_9,
+ device_id as t1_device_id_original_10,
+ device_ip as t1_device_ip_original_11,
+ device_model as t1_device_model_original_12,
+ device_type as t1_device_type_original_13,
+ device_conn_type as t1_device_conn_type_original_14,
+ C14 as t1_C14_original_15,
+ C15 as t1_C15_original_16,
+ C16 as t1_C16_original_17,
+ C17 as t1_C17_original_18,
+ C18 as t1_C18_original_19,
+ C19 as t1_C19_original_20,
+ C20 as t1_C20_original_21,
+ C21 as t1_C21_original_22,
+ click as t1_click_original_23,
+ device_ip as t1_device_ip_combine_24,
+ device_model as t1_device_model_combine_24,
+ C17 as t1_C17_combine_24,
+ device_ip as t1_device_ip_combine_25,
+ device_model as t1_device_model_combine_25,
+ C19 as t1_C19_combine_25,
+ device_ip as t1_device_ip_combine_26,
+ device_model as t1_device_model_combine_26,
+ C21 as t1_C21_combine_26,
+ banner_pos as t1_banner_pos_combine_27,
+ device_ip as t1_device_ip_combine_27,
+ device_model as t1_device_model_combine_27,
+ C1 as t1_C1_combine_28,
+ banner_pos as t1_banner_pos_combine_28,
+ site_domain as t1_site_domain_combine_29,
+ device_ip as t1_device_ip_combine_29,
+ device_model as t1_device_model_combine_29,
+ site_id as t1_site_id_combine_30,
+ device_ip as t1_device_ip_combine_30,
+ device_model as t1_device_model_combine_30,
+ app_domain as t1_app_domain_combine_31,
+ device_ip as t1_device_ip_combine_31,
+ device_model as t1_device_model_combine_31,
+ site_category as t1_site_category_combine_32,
+ device_ip as t1_device_ip_combine_32,
+ device_model as t1_device_model_combine_32,
+ device_ip as t1_device_ip_combine_33,
+ device_model as t1_device_model_combine_33,
+ C18 as t1_C18_combine_33,
+ fz_top1_ratio(id) over t1_device_ip_time_0s_7200s as t1_id_window_top1_ratio_34,
+ fz_top1_ratio(id) over t1_device_ip_time_0s_36000s as t1_id_window_top1_ratio_35,
+ case when !isnull(lag(app_domain, 0)) over t1_device_ip_time_0s_7200s then count(app_domain) over t1_device_ip_time_0s_7200s else null end as t1_app_domain_window_count_36,
+ case when !isnull(lag(app_category, 0)) over t1_device_ip_time_0s_7200s then count(app_category) over t1_device_ip_time_0s_7200s else null end as t1_app_category_window_count_37,
+ case when !isnull(lag(device_model, 0)) over t1_device_ip_time_0s_36000s then count(device_model) over t1_device_ip_time_0s_36000s else null end as t1_device_model_window_count_38,
+ case when !isnull(lag(app_id, 0)) over t1_device_ip_time_0s_7200s then count(app_id) over t1_device_ip_time_0s_7200s else null end as t1_app_id_window_count_39,
+ case when !isnull(lag(C17, 0)) over t1_device_ip_time_0s_7200s then count(C17) over t1_device_ip_time_0s_7200s else null end as t1_C17_window_count_40,
+ case when !isnull(lag(C19, 0)) over t1_device_ip_time_0s_7200s then count(C19) over t1_device_ip_time_0s_7200s else null end as t1_C19_window_count_41,
+ case when !isnull(lag(banner_pos, 0)) over t1_device_ip_time_0s_7200s then count(banner_pos) over t1_device_ip_time_0s_7200s else null end as t1_banner_pos_window_count_42,
+ fz_top1_ratio(C14) over t1_device_ip_time_0s_7200s as t1_C14_window_top1_ratio_43,
+ case when !isnull(lag(app_id, 0)) over t1_device_ip_time_0s_36000s then count(app_id) over t1_device_ip_time_0s_36000s else null end as t1_app_id_window_count_44,
+ case when !isnull(lag(site_id, 0)) over t1_device_ip_time_0s_36000s then count(site_id) over t1_device_ip_time_0s_36000s else null end as t1_site_id_window_count_45,
+ case when !isnull(lag(site_domain, 0)) over t1_device_ip_time_0s_36000s then count(site_domain) over t1_device_ip_time_0s_36000s else null end as t1_site_domain_window_count_46,
+ case when !isnull(lag(site_category, 0)) over t1_device_ip_time_0s_36000s then count(site_category) over t1_device_ip_time_0s_36000s else null end as t1_site_category_window_count_47,
+ case when !isnull(lag(app_domain, 0)) over t1_device_ip_time_0s_36000s then count(app_domain) over t1_device_ip_time_0s_36000s else null end as t1_app_domain_window_count_48,
+ case when !isnull(lag(app_category, 0)) over t1_device_ip_time_0s_36000s then count(app_category) over t1_device_ip_time_0s_36000s else null end as t1_app_category_window_count_49,
+ case when !isnull(lag(device_id, 0)) over t1_device_ip_time_0s_36000s then count(device_id) over t1_device_ip_time_0s_36000s else null end as t1_device_id_window_count_50,
+ case when !isnull(lag(C18, 0)) over t1_device_ip_time_0s_36000s then count(C18) over t1_device_ip_time_0s_36000s else null end as t1_C18_window_count_51,
+ case when !isnull(lag(device_conn_type, 0)) over t1_device_ip_time_0s_36000s then count(device_conn_type) over t1_device_ip_time_0s_36000s else null end as t1_device_conn_type_window_count_52,
+ case when !isnull(lag(C19, 0)) over t1_device_ip_time_0s_36000s then count(C19) over t1_device_ip_time_0s_36000s else null end as t1_C19_window_count_53
+ from
+ {0}
+ window t1_device_ip_time_0s_7200s as ( partition by device_ip order by `time` rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_device_ip_time_0s_36000s as ( partition by device_ip order by `time` rows_range between 36000s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ expect:
+ success: true
diff --git a/cases/integration_test/spark/test_credit.yaml b/cases/integration_test/spark/test_credit.yaml
new file mode 100644
index 00000000000..4e466ad44d0
--- /dev/null
+++ b/cases/integration_test/spark/test_credit.yaml
@@ -0,0 +1,1012 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_db
+cases:
+- id: 1
+ desc: 多表-信用卡用户转借记卡预测场景
+ inputs:
+ - columns:
+ - "id int32"
+ - "cust_id_an int32"
+ - "ins_date timestamp"
+ - "Label int32"
+ indexs: ["index1:id:ins_date"]
+ rows:
+ - - -1985437192
+ - -1317908971
+ - 1611144215000
+ - -506221216
+ - - -906012118
+ - 122153399
+ - 1611144215000
+ - -2073586764
+ - - 192540564
+ - -1868891884
+ - 1611144215000
+ - -1291765609
+ - - -12339370
+ - -1068593442
+ - 1611144215000
+ - -1218544053
+ - - -1346508105
+ - 389329057
+ - 1611144215000
+ - 72100927
+ - - -1563970013
+ - 64743832
+ - 1611144215000
+ - -1456087176
+ - - -420456303
+ - 83758185
+ - 1611144215000
+ - 593328038
+ name: t1
+ - columns:
+ - "cust_id_an int32"
+ - "agmt_id_an int32"
+ - "atta_acct_ind string"
+ - "stmt_dt date"
+ - "open_acct_dt timestamp"
+ - "clos_acct_dt timestamp"
+ indexs: ["indext8:cust_id_an:open_acct_dt"]
+ rows:
+ - - -103578926
+ - -2129345374
+ - "atta_acct_ind_QSKoYcBykS"
+ - "2021-01-19"
+ - 1611144215000
+ - 1611144215000
+ - - -1738231442
+ - -1827648982
+ - "atta_acct_ind_YCzSZpWh36"
+ - "2021-01-01"
+ - 1611144215000
+ - 1611144215000
+ - - -313899349
+ - -620524833
+ - "atta_acct_ind_b06NdQiRiE"
+ - "2021-01-14"
+ - 1611144215000
+ - 1611144215000
+ - - -412596205
+ - -1082468256
+ - "atta_acct_ind_4rFa5IVSF4"
+ - "2021-01-02"
+ - 1611144215000
+ - 1611144215000
+ - - -48236232
+ - -170343294
+ - "atta_acct_ind_NU8FhCMOiL"
+ - "2021-01-11"
+ - 1611144215000
+ - 1611144215000
+ - - -1455816949
+ - 403926185
+ - "atta_acct_ind_yIDKZcJSaj"
+ - "2021-01-04"
+ - 1611144215000
+ - 1611144215000
+ - - 225487286
+ - 834608659
+ - "atta_acct_ind_xOG219V8NP"
+ - "2021-01-01"
+ - 1611144215000
+ - 1611144215000
+ name: t8
+ - columns:
+ - "cust_id_an int32"
+ - "agmt_id_an int32"
+ - "curr_ovrd_stat_cd string"
+ - "curr_yr_ovrd_cnt int32"
+ - "curr_yr_crdt_card_point double"
+ - "crdt_card_point double"
+ - "acct_stat_cd string"
+ - "consm_od_bal double"
+ - "cash_od_bal double"
+ - "amtbl_od_bal double"
+ - "spl_pay_bal double"
+ - "ovrd_bal double"
+ - "last_mth_stmt_amt double"
+ - "last_mth_consm_cnt int32"
+ - "m_consm_amt_accm double"
+ - "m_cash_amt_accm double"
+ - "m_amtbl_amt_accm double"
+ - "m_spl_pay_amt_accm double"
+ - "m_ovrd_bal_accm double"
+ - "data_date timestamp"
+ indexs: ["indext9:cust_id_an:data_date"]
+ rows:
+ - - -1965865733
+ - 181943904
+ - "curr_ovrd_stat_cd_pSWF7Z7UVZ"
+ - 288301759
+ - 10.03
+ - 57.38
+ - "acct_stat_cd_cTTBtj3JnQ"
+ - 30.94
+ - -53.93
+ - -81.51
+ - -111.3
+ - -101.78
+ - 68.7
+ - -1929310650
+ - 121.96
+ - -35.3
+ - -1.68
+ - 109.97
+ - 89.78
+ - 1611144215000
+ - - 305578483
+ - 594627092
+ - "curr_ovrd_stat_cd_KH7JIGfFuM"
+ - -583313456
+ - -109.77
+ - 22.53
+ - "acct_stat_cd_nrBFWkaCSO"
+ - -14.29
+ - 126.7
+ - 40.33
+ - 120.44
+ - -73.54
+ - 17.18
+ - -337679856
+ - -81.93
+ - -19.57
+ - -11.83
+ - 80.59
+ - 75.35
+ - 1611144215000
+ - - -501231072
+ - 22230390
+ - "curr_ovrd_stat_cd_Mwu1mCxGqn"
+ - 1039709568
+ - -113.24
+ - -108.36
+ - "acct_stat_cd_co20Q23EM8"
+ - -58.61
+ - -73.54
+ - -98.85
+ - -43.24
+ - 33.71
+ - -11.95
+ - -1818947456
+ - -59.67
+ - -62.73
+ - -51.21
+ - 50.64
+ - 90.51
+ - 1611144215000
+ - - -1832175587
+ - -991415524
+ - "curr_ovrd_stat_cd_H1NkAqnwqe"
+ - -1908516905
+ - -27.17
+ - 102.83
+ - "acct_stat_cd_pq3jTUtjF0"
+ - 91.15
+ - -83.81
+ - -69.61
+ - 127.86
+ - -86.14
+ - 56.68
+ - -1995257141
+ - 6.71
+ - 83.5
+ - -32.51
+ - -94.43
+ - 8.3
+ - 1611144215000
+ - - 611330902
+ - 679194351
+ - "curr_ovrd_stat_cd_HIlzlZymnH"
+ - -254111972
+ - 3.04
+ - 9.18
+ - "acct_stat_cd_PhHHTvGLTL"
+ - -75.39
+ - 15.09
+ - -18.1
+ - -104.29
+ - -49.22
+ - -100.48
+ - 730288655
+ - 58.18
+ - 8.3
+ - 11.78
+ - -91.13
+ - 6.87
+ - 1611144215000
+ - - 826069039
+ - 470439749
+ - "curr_ovrd_stat_cd_8JQvcEi7yJ"
+ - 811087014
+ - 85.17
+ - -97.16
+ - "acct_stat_cd_AFju4WMCgx"
+ - -108.14
+ - 117.13
+ - -93.99
+ - 70.68
+ - 107.57
+ - 98.27
+ - -891433275
+ - 35.0
+ - -33.36
+ - 127.18
+ - 25.36
+ - -64.98
+ - 1611144215000
+ - - -784663900
+ - -1192305947
+ - "curr_ovrd_stat_cd_U4Ophb2kIQ"
+ - 515010670
+ - 105.76
+ - 3.51
+ - "acct_stat_cd_Z1Kyb1mz7y"
+ - 9.64
+ - -28.33
+ - 60.18
+ - 117.39
+ - -24.18
+ - -0.82
+ - -1458522076
+ - 105.11
+ - -68.3
+ - -16.45
+ - -29.62
+ - 47.34
+ - 1611144215000
+ - - 808471893
+ - -2029597450
+ - "curr_ovrd_stat_cd_NMy2UGhIrf"
+ - -551211114
+ - -29.29
+ - -92.54
+ - "acct_stat_cd_HMcl6pIDg4"
+ - 6.99
+ - -111.57
+ - -124.1
+ - 85.09
+ - 113.05
+ - -25.19
+ - -928477688
+ - 110.96
+ - 14.01
+ - 95.6
+ - 4.15
+ - -56.27
+ - 1611144215000
+ - - -414811981
+ - -106781549
+ - "curr_ovrd_stat_cd_z5gVcFFs0m"
+ - -1846401879
+ - 11.12
+ - -56.57
+ - "acct_stat_cd_pbubmnmn1M"
+ - -63.85
+ - -47.45
+ - 124.76
+ - -120.79
+ - -70.46
+ - -42.95
+ - -1432475728
+ - -123.98
+ - 25.41
+ - -95.39
+ - -76.1
+ - 50.44
+ - 1611144215000
+ - - 352609173
+ - 748553820
+ - "curr_ovrd_stat_cd_qgOUkDJ1rQ"
+ - -932519461
+ - -80.07
+ - 75.8
+ - "acct_stat_cd_9AdRp2Spps"
+ - -102.28
+ - 88.3
+ - -15.75
+ - 108.03
+ - -127.15
+ - 94.95
+ - -1288349027
+ - 100.95
+ - 2.77
+ - 81.25
+ - -26.63
+ - 70.67
+ - 1611144215000
+ name: t9
+ - columns:
+ - "cust_id_an int32"
+ - "card_agmt_id_an int32"
+ - "pri_acct_id_an int32"
+ - "atta_card_ind string"
+ - "camp_org_id string"
+ - "prod_id string"
+ - "snp_gage_cd string"
+ - "crdt_card_lvl_cd string"
+ - "pin_card_dt date"
+ - "card_matr_yr_mth string"
+ - "sell_chnl_cd string"
+ - "card_org_cd string"
+ - "actv_chnl_cd string"
+ - "free_annl_fee_ind string"
+ - "annl_fee double"
+ - "bus_card_ind string"
+ - "matr_contn_card_ind string"
+ - "issu_card_dt timestamp"
+ - "actv_dt timestamp"
+ indexs: ["indext6:cust_id_an:actv_dt"]
+ rows:
+ - - 756930160
+ - -1362270267
+ - 820739577
+ - "atta_card_ind_4oS8b63mVd"
+ - "camp_org_id_BFbsLHpdSR"
+ - "prod_id_3m2TZ0si7Z"
+ - "snp_gage_cd_onOB021pP1"
+ - "crdt_card_lvl_cd_vQuD1gTTwe"
+ - "2021-01-12"
+ - "card_matr_yr_mth_tDIUWOk5ia"
+ - "sell_chnl_cd_FLfurUmdfR"
+ - "card_org_cd_piAFoPGMLH"
+ - "actv_chnl_cd_mTHr98b5Es"
+ - "free_annl_fee_ind_Lq3eblqZFw"
+ - 68.08
+ - "bus_card_ind_5KK6nTjOxr"
+ - "matr_contn_card_ind_S4hHwHdJNH"
+ - 1611144215000
+ - 1611144215000
+ - - 394465803
+ - -1469812793
+ - 46768555
+ - "atta_card_ind_MEbCAC4sCs"
+ - "camp_org_id_gV8Zs3vkri"
+ - "prod_id_Pk1B3xv6JA"
+ - "snp_gage_cd_ZgHDu3hZbx"
+ - "crdt_card_lvl_cd_Etc9TpL5u7"
+ - "2021-01-02"
+ - "card_matr_yr_mth_AMweyZaygN"
+ - "sell_chnl_cd_dV661JROf4"
+ - "card_org_cd_8nvfaf471b"
+ - "actv_chnl_cd_nmjPCpzA37"
+ - "free_annl_fee_ind_0yvInU4aXe"
+ - -4.02
+ - "bus_card_ind_gDjvmuKOo9"
+ - "matr_contn_card_ind_MgCwGwHYy4"
+ - 1611144215000
+ - 1611144215000
+ - - -1915196249
+ - 715245555
+ - -1037414536
+ - "atta_card_ind_NBFRDWsXul"
+ - "camp_org_id_LUgZQkavDC"
+ - "prod_id_5HHVvMevjR"
+ - "snp_gage_cd_TLVPPbmIqP"
+ - "crdt_card_lvl_cd_f1khBG0oFM"
+ - "2021-01-08"
+ - "card_matr_yr_mth_0AoPAu7blU"
+ - "sell_chnl_cd_gmGs4O8BsG"
+ - "card_org_cd_fCbMNmDc7W"
+ - "actv_chnl_cd_SkuX9MfN7Z"
+ - "free_annl_fee_ind_oEUcJ2azyx"
+ - 108.44
+ - "bus_card_ind_NWfBj4nd18"
+ - "matr_contn_card_ind_6ieA1VpR6O"
+ - 1611144215000
+ - 1611144215000
+ - - -1937671087
+ - -1386163364
+ - 936709843
+ - "atta_card_ind_SchOBM3ADn"
+ - "camp_org_id_iIcs5gi51w"
+ - "prod_id_pNeYvSsCK9"
+ - "snp_gage_cd_qs3ZQWlyfm"
+ - "crdt_card_lvl_cd_Nzbp7Cy4v2"
+ - "2021-01-02"
+ - "card_matr_yr_mth_24GI4NhCum"
+ - "sell_chnl_cd_e6sZGx0UIr"
+ - "card_org_cd_mEaWKOr2eK"
+ - "actv_chnl_cd_5jHnIHbODx"
+ - "free_annl_fee_ind_mNxB0OUuqB"
+ - -94.58
+ - "bus_card_ind_9twM1Sm8N6"
+ - "matr_contn_card_ind_Ze6N7bLuqc"
+ - 1611144215000
+ - 1611144215000
+ - - -1897243199
+ - -1931817796
+ - 390672335
+ - "atta_card_ind_mN5Mw55PCb"
+ - "camp_org_id_Zn4STXeUD6"
+ - "prod_id_4uoNNgMc0p"
+ - "snp_gage_cd_fNOXthNs7J"
+ - "crdt_card_lvl_cd_ynL4AtIJa3"
+ - "2021-01-11"
+ - "card_matr_yr_mth_XROF2DVFVq"
+ - "sell_chnl_cd_0QLdMs0ENq"
+ - "card_org_cd_odnosB8A0R"
+ - "actv_chnl_cd_AjThMogiEt"
+ - "free_annl_fee_ind_Eem4dzghME"
+ - -72.53
+ - "bus_card_ind_7AD96Q3i6Z"
+ - "matr_contn_card_ind_35MrxB5cXA"
+ - 1611144215000
+ - 1611144215000
+ - - -1853796531
+ - -1258445777
+ - -1547814111
+ - "atta_card_ind_oeDA6We5EC"
+ - "camp_org_id_S7pZ2RJ4HP"
+ - "prod_id_DHeuN53pSv"
+ - "snp_gage_cd_aW92GS2DMu"
+ - "crdt_card_lvl_cd_tzSehkdxa8"
+ - "2021-01-17"
+ - "card_matr_yr_mth_bIlFSqWgT9"
+ - "sell_chnl_cd_SQE3eVhOwn"
+ - "card_org_cd_GiXhH8Ilw1"
+ - "actv_chnl_cd_BBCwH068cK"
+ - "free_annl_fee_ind_t5sz5QGjAq"
+ - 59.91
+ - "bus_card_ind_2HCWPtpDe5"
+ - "matr_contn_card_ind_vAViU3mnTF"
+ - 1611144215000
+ - 1611144215000
+ - - 599351765
+ - -2026344167
+ - 406435567
+ - "atta_card_ind_0Dc8HKmpeg"
+ - "camp_org_id_jY2qjsi2yM"
+ - "prod_id_nn1lrj5ZFX"
+ - "snp_gage_cd_SDeBM6a51B"
+ - "crdt_card_lvl_cd_LfX4N7yXil"
+ - "2021-01-12"
+ - "card_matr_yr_mth_ORvNy6K6TO"
+ - "sell_chnl_cd_sUJHlnXZS4"
+ - "card_org_cd_SzZoSXxmYR"
+ - "actv_chnl_cd_FuTmvFJMGv"
+ - "free_annl_fee_ind_00i8JxFXcx"
+ - 68.1
+ - "bus_card_ind_pWrx4XVAKK"
+ - "matr_contn_card_ind_MgjGK92EfE"
+ - 1611144215000
+ - 1611144215000
+ - - 129929182
+ - -812735353
+ - -776403184
+ - "atta_card_ind_caXhPAUCSn"
+ - "camp_org_id_wvDdQBr0bh"
+ - "prod_id_6OrANg0pDT"
+ - "snp_gage_cd_qZhYdtg1EX"
+ - "crdt_card_lvl_cd_WLmc0oczDJ"
+ - "2021-01-13"
+ - "card_matr_yr_mth_fJ7zh8PWuu"
+ - "sell_chnl_cd_vA3H163pUi"
+ - "card_org_cd_se7PxoQEWW"
+ - "actv_chnl_cd_IuJot5ylAH"
+ - "free_annl_fee_ind_PlRcZHiwDg"
+ - 40.89
+ - "bus_card_ind_Q6vTzxFs7N"
+ - "matr_contn_card_ind_M8fvOjy5B0"
+ - 1611144215000
+ - 1611144215000
+ - - -1696305996
+ - -178589482
+ - 788546600
+ - "atta_card_ind_MkfeU6kAPv"
+ - "camp_org_id_4Bn9Zgg4eM"
+ - "prod_id_1ah3kydsh7"
+ - "snp_gage_cd_ySl8kkcGst"
+ - "crdt_card_lvl_cd_L8aZAMygq2"
+ - "2021-01-07"
+ - "card_matr_yr_mth_ZXmdyVXukr"
+ - "sell_chnl_cd_UXPdm0d9B6"
+ - "card_org_cd_3QYp5QfEG6"
+ - "actv_chnl_cd_uRXCNeSnzt"
+ - "free_annl_fee_ind_WyScZ3hmyM"
+ - 5.45
+ - "bus_card_ind_taVaX634Mh"
+ - "matr_contn_card_ind_ppVD5sqBfA"
+ - 1611144215000
+ - 1611144215000
+ name: t6
+ - columns:
+ - "cust_id_an int32"
+ - "card_agmt_id_an int32"
+ - "fst_use_card_dt date"
+ - "ltst_use_card_dt date"
+ - "card_stat_cd string"
+ - "data_date timestamp"
+ indexs: ["indext7:cust_id_an:data_date"]
+ rows:
+ - - -1416323258
+ - 1062068004
+ - "2021-01-15"
+ - "2021-01-10"
+ - "card_stat_cd_I5RUbf7xEL"
+ - 1611144216000
+ - - 433240030
+ - 729717634
+ - "2021-01-19"
+ - "2021-01-17"
+ - "card_stat_cd_wFB0gUWKQI"
+ - 1611144216000
+ - - -1880955883
+ - -1807838612
+ - "2021-01-03"
+ - "2021-01-19"
+ - "card_stat_cd_rG5nhnzcV5"
+ - 1611144216000
+ name: t7
+ - columns:
+ - "cust_id_an int32"
+ - "crdt_card_net_incom_amt double"
+ - "int_incom_amt double"
+ - "annl_fee_incom_amt double"
+ - "cash_incom_amt double"
+ - "commsn_incom_amt double"
+ - "late_chrg_incom_amt double"
+ - "extras_fee_incom_amt double"
+ - "oth_incom_amt double"
+ - "amtbl_comm_fee double"
+ - "cap_cost_amt double"
+ - "provs_cost_amt double"
+ - "data_date timestamp"
+ indexs: ["indext5:cust_id_an:data_date"]
+ rows:
+ - - -586341746
+ - -91.38
+ - -103.8
+ - -91.79
+ - 77.09
+ - -39.25
+ - -104.55
+ - -25.37
+ - -42.69
+ - 20.24
+ - 121.05
+ - 40.71
+ - 1611144216000
+ - - -903799431
+ - 82.69
+ - 56.49
+ - -105.1
+ - -126.73
+ - 91.97
+ - -113.83
+ - -119.99
+ - 126.4
+ - 107.63
+ - -1.88
+ - 54.72
+ - 1611144216000
+ - - -2006396570
+ - 101.8
+ - -63.94
+ - 7.75
+ - 41.46
+ - -42.03
+ - 52.33
+ - 39.98
+ - 10.07
+ - -29.53
+ - 126.03
+ - -63.56
+ - 1611144216000
+ - - -2035678095
+ - -99.5
+ - 83.92
+ - -63.44
+ - -45.01
+ - -16.37
+ - 105.96
+ - -82.37
+ - -76.09
+ - -120.12
+ - -116.56
+ - 22.47
+ - 1611144216000
+ - - 634869109
+ - -38.91
+ - -0.08
+ - 25.59
+ - -80.43
+ - -23.8
+ - 127.24
+ - 72.18
+ - -84.52
+ - -91.3
+ - -64.03
+ - -117.28
+ - 1611144216000
+ name: t5
+ - columns:
+ - "cust_id_an int32"
+ - "crdt_lmt_cust double"
+ - "aval_lmt_cust double"
+ - "crdt_lmt_cash double"
+ - "aval_lmt_cash double"
+ - "data_date timestamp"
+ indexs: ["indext3:cust_id_an:data_date"]
+ rows:
+ - - -2001222170
+ - -4.23
+ - -101.67
+ - 76.28
+ - -83.94
+ - 1611144216000
+ - - -1514280701
+ - -32.77
+ - -73.6
+ - -17.73
+ - 118.89
+ - 1611144216000
+ - - 5866653
+ - 25.81
+ - 109.68
+ - 62.1
+ - -121.53
+ - 1611144216000
+ - - 10968234
+ - 94.03
+ - -27.92
+ - 37.07
+ - -42.7
+ - 1611144216000
+ - - -537371887
+ - -120.6
+ - 3.15
+ - -22.5
+ - -115.86
+ - 1611144216000
+ - - -904433195
+ - 116.03
+ - -44.09
+ - 65.5
+ - 100.47
+ - 1611144216000
+ - - -358019130
+ - -74.14
+ - 127.09
+ - 30.8
+ - 100.9
+ - 1611144216000
+ name: t3
+ - columns:
+ - "cust_id_an int32"
+ - "cert_typ_cd string"
+ - "cert_area_cd string"
+ - "birth_dt date"
+ - "gender_typ_cd string"
+ - "nation_cd string"
+ - "marrrg_situ_cd string"
+ - "rsdnt_ind string"
+ - "citic_grp_emp_typ_cd string"
+ - "cust_stat_cd string"
+ - "open_cust_dt date"
+ - "open_cust_org_id string"
+ - "open_cust_chnl_typ_cd string"
+ - "cust_belg_bank_cd string"
+ indexs: ["indext2:cust_id_an"]
+ rows:
+ - - -164930359
+ - "cert_typ_cd_cGpwz0DGMQ"
+ - "cert_area_cd_HecqmfKfQ7"
+ - "2021-01-09"
+ - "gender_typ_cd_HlbTDsKxLx"
+ - "nation_cd_IcAmK6iCHk"
+ - "marrrg_situ_cd_JzdSTSvnI2"
+ - "rsdnt_ind_qV6EO9H2E4"
+ - "citic_grp_emp_typ_cd_mZjOs6AvEm"
+ - "cust_stat_cd_pL86avtzOm"
+ - "2021-01-12"
+ - "open_cust_org_id_TgCKG40Joz"
+ - "open_cust_chnl_typ_cd_cBUBu2Wm6D"
+ - "cust_belg_bank_cd_UBZAxmSLUW"
+ - - -43274786
+ - "cert_typ_cd_QetmS9wxcU"
+ - "cert_area_cd_rrltclnYQU"
+ - "2021-01-05"
+ - "gender_typ_cd_DzQCyg6Ui2"
+ - "nation_cd_tasmOg7NAe"
+ - "marrrg_situ_cd_t43rdVAhR5"
+ - "rsdnt_ind_qZOBkBtacn"
+ - "citic_grp_emp_typ_cd_Xp6gvlxr7o"
+ - "cust_stat_cd_R9lp6oM2x8"
+ - "2021-01-03"
+ - "open_cust_org_id_7rnyNbu4Yu"
+ - "open_cust_chnl_typ_cd_mu1leQa1Gx"
+ - "cust_belg_bank_cd_XLIXJnEtRf"
+ name: t2
+ - columns:
+ - "cust_id_an int32"
+ - "tx_time timestamp"
+ - "crdt_card_tx_cd string"
+ - "tx_amt_to_rmb double"
+ - "mercht_typ_cd string"
+ - "cross_bord_ind string"
+ - "tx_desc_an int32"
+ indexs: ["indext4:cust_id_an:tx_time"]
+ rows:
+ - - 951632459
+ - 1611144216000
+ - "crdt_card_tx_cd_6j6bjhDy9o"
+ - 110.73
+ - "mercht_typ_cd_feZu3kqy1P"
+ - "cross_bord_ind_j5RBoKax1g"
+ - -1752891717
+ - - 1033871191
+ - 1611144216000
+ - "crdt_card_tx_cd_bDs5fzy7vx"
+ - -20.85
+ - "mercht_typ_cd_Ponis59I95"
+ - "cross_bord_ind_3ErQHlOtLq"
+ - 24112845
+ - - 19144738
+ - 1611144216000
+ - "crdt_card_tx_cd_G2CZyldEgg"
+ - -94.15
+ - "mercht_typ_cd_xM8BN1jxf5"
+ - "cross_bord_ind_MuFWwfgxqi"
+ - -1625982017
+ - - -709159498
+ - 1611144216000
+ - "crdt_card_tx_cd_SWmMk5bGbe"
+ - -104.9
+ - "mercht_typ_cd_F8SmujshlU"
+ - "cross_bord_ind_Cja6dv7mJt"
+ - 734595537
+ - - 407401011
+ - 1611144216000
+ - "crdt_card_tx_cd_Q2bYofa0LV"
+ - 118.56
+ - "mercht_typ_cd_raO5rr5AZW"
+ - "cross_bord_ind_FtZc0Pd2e8"
+ - -347783598
+ - - -274181216
+ - 1611144216000
+ - "crdt_card_tx_cd_SrvekEh3VO"
+ - -36.7
+ - "mercht_typ_cd_wkQggxQwfB"
+ - "cross_bord_ind_lIkIIKdrmU"
+ - -1929744820
+ - - -1693120077
+ - 1611144216000
+ - "crdt_card_tx_cd_crzOFQUvEV"
+ - -63.78
+ - "mercht_typ_cd_gyHnXWDCcr"
+ - "cross_bord_ind_lSjZJSUzjz"
+ - -1367456280
+ - - -1441604939
+ - 1611144216000
+ - "crdt_card_tx_cd_gLqQvmRyub"
+ - 58.01
+ - "mercht_typ_cd_ltgNcE28wj"
+ - "cross_bord_ind_ruileQrE9G"
+ - -26181260
+ name: t4
+ sql: |-
+ select * from
+ (
+ select
+ id as id_1,
+ `id` as t1_id_original_0,
+ `cust_id_an` as t1_cust_id_an_original_1,
+ `ins_date` as t1_ins_date_original_2,
+ `Label` as t1_Label_original_3,
+ dayofweek(timestamp(`ins_date`)) as t1_ins_date_dayofweek_138
+ from
+ `t1`
+ )
+ as out0
+ last join
+ (
+ select
+ t1.id as id_5,
+ `t2_cust_id_an`.`birth_dt` as t2_birth_dt_multi_direct_4,
+ `t2_cust_id_an`.`cert_area_cd` as t2_cert_area_cd_multi_direct_5,
+ `t2_cust_id_an`.`cert_typ_cd` as t2_cert_typ_cd_multi_direct_6,
+ `t2_cust_id_an`.`citic_grp_emp_typ_cd` as t2_citic_grp_emp_typ_cd_multi_direct_7,
+ `t2_cust_id_an`.`cust_belg_bank_cd` as t2_cust_belg_bank_cd_multi_direct_8,
+ `t2_cust_id_an`.`cust_stat_cd` as t2_cust_stat_cd_multi_direct_9,
+ `t2_cust_id_an`.`gender_typ_cd` as t2_gender_typ_cd_multi_direct_10,
+ `t2_cust_id_an`.`marrrg_situ_cd` as t2_marrrg_situ_cd_multi_direct_11,
+ `t2_cust_id_an`.`nation_cd` as t2_nation_cd_multi_direct_12,
+ `t2_cust_id_an`.`open_cust_chnl_typ_cd` as t2_open_cust_chnl_typ_cd_multi_direct_13,
+ `t2_cust_id_an`.`open_cust_dt` as t2_open_cust_dt_multi_direct_14,
+ `t2_cust_id_an`.`open_cust_org_id` as t2_open_cust_org_id_multi_direct_15,
+ `t2_cust_id_an`.`rsdnt_ind` as t2_rsdnt_ind_multi_direct_16,
+ `t3_cust_id_an__ins_date_0_10`.`aval_lmt_cash` as t3_aval_lmt_cash_multi_last_value_17,
+ `t3_cust_id_an__ins_date_0_10`.`aval_lmt_cust` as t3_aval_lmt_cust_multi_last_value_18,
+ `t3_cust_id_an__ins_date_0_10`.`crdt_lmt_cash` as t3_crdt_lmt_cash_multi_last_value_19,
+ `t3_cust_id_an__ins_date_0_10`.`crdt_lmt_cust` as t3_crdt_lmt_cust_multi_last_value_20,
+ `t3_cust_id_an__ins_date_0_10`.`data_date` as t3_data_date_multi_last_value_21,
+ `t5_cust_id_an__ins_date_0_10`.`amtbl_comm_fee` as t5_amtbl_comm_fee_multi_last_value_22,
+ `t5_cust_id_an__ins_date_0_10`.`annl_fee_incom_amt` as t5_annl_fee_incom_amt_multi_last_value_23,
+ `t5_cust_id_an__ins_date_0_10`.`cap_cost_amt` as t5_cap_cost_amt_multi_last_value_24,
+ `t5_cust_id_an__ins_date_0_10`.`cash_incom_amt` as t5_cash_incom_amt_multi_last_value_25,
+ `t5_cust_id_an__ins_date_0_10`.`commsn_incom_amt` as t5_commsn_incom_amt_multi_last_value_26,
+ `t5_cust_id_an__ins_date_0_10`.`crdt_card_net_incom_amt` as t5_crdt_card_net_incom_amt_multi_last_value_27,
+ `t5_cust_id_an__ins_date_0_10`.`data_date` as t5_data_date_multi_last_value_28,
+ `t5_cust_id_an__ins_date_0_10`.`extras_fee_incom_amt` as t5_extras_fee_incom_amt_multi_last_value_29,
+ `t5_cust_id_an__ins_date_0_10`.`int_incom_amt` as t5_int_incom_amt_multi_last_value_30,
+ `t5_cust_id_an__ins_date_0_10`.`late_chrg_incom_amt` as t5_late_chrg_incom_amt_multi_last_value_31,
+ `t5_cust_id_an__ins_date_0_10`.`oth_incom_amt` as t5_oth_incom_amt_multi_last_value_32,
+ `t5_cust_id_an__ins_date_0_10`.`provs_cost_amt` as t5_provs_cost_amt_multi_last_value_33
+ from
+ `t1`
+ last join `t2` as `t2_cust_id_an` on `t1`.`cust_id_an` = `t2_cust_id_an`.`cust_id_an`
+ last join `t3` as `t3_cust_id_an__ins_date_0_10` order by t3_cust_id_an__ins_date_0_10.`data_date` on `t1`.`cust_id_an` = `t3_cust_id_an__ins_date_0_10`.`cust_id_an`
+ last join `t5` as `t5_cust_id_an__ins_date_0_10` order by t5_cust_id_an__ins_date_0_10.`data_date` on `t1`.`cust_id_an` = `t5_cust_id_an__ins_date_0_10`.`cust_id_an`)
+ as out1
+ on out0.id_1 = out1.id_5
+ last join
+ (
+ select
+ id as id_35,
+ min(`tx_amt_to_rmb`) over t4_cust_id_an_tx_time_0s_1d as t4_tx_amt_to_rmb_multi_min_34,
+ avg(`tx_amt_to_rmb`) over t4_cust_id_an_tx_time_0s_1d as t4_tx_amt_to_rmb_multi_avg_35,
+ fz_topn_frequency(`crdt_card_tx_cd`, 3) over t4_cust_id_an_tx_time_0_100 as t4_crdt_card_tx_cd_multi_top3frequency_36,
+ distinct_count(`crdt_card_tx_cd`) over t4_cust_id_an_tx_time_0_100 as t4_crdt_card_tx_cd_multi_unique_count_37,
+ distinct_count(`cross_bord_ind`) over t4_cust_id_an_tx_time_0_100 as t4_cross_bord_ind_multi_unique_count_38,
+ fz_topn_frequency(`cross_bord_ind`, 3) over t4_cust_id_an_tx_time_0_100 as t4_cross_bord_ind_multi_top3frequency_39,
+ distinct_count(`mercht_typ_cd`) over t4_cust_id_an_tx_time_0_10 as t4_mercht_typ_cd_multi_unique_count_40,
+ distinct_count(`mercht_typ_cd`) over t4_cust_id_an_tx_time_0_100 as t4_mercht_typ_cd_multi_unique_count_41,
+ distinct_count(`tx_desc_an`) over t4_cust_id_an_tx_time_0_10 as t4_tx_desc_an_multi_unique_count_42,
+ distinct_count(`tx_desc_an`) over t4_cust_id_an_tx_time_0_100 as t4_tx_desc_an_multi_unique_count_43
+ from
+ (select `cust_id_an` as `cust_id_an`, `ins_date` as `tx_time`, '' as `crdt_card_tx_cd`, double(0) as `tx_amt_to_rmb`, '' as `mercht_typ_cd`, '' as `cross_bord_ind`, int(0) as `tx_desc_an`, id from `t1`)
+ window t4_cust_id_an_tx_time_0s_1d as (
+ UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t4_cust_id_an_tx_time_0_100 as (
+ UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t4_cust_id_an_tx_time_0_10 as (
+ UNION (select `cust_id_an`, `tx_time`, `crdt_card_tx_cd`, `tx_amt_to_rmb`, `mercht_typ_cd`, `cross_bord_ind`, `tx_desc_an`, int(0) as id from `t4`) partition by `cust_id_an` order by `tx_time` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+ as out2
+ on out0.id_1 = out2.id_35
+ last join
+ (
+ select
+ id as id_45,
+ min(`annl_fee`) over t6_cust_id_an_actv_dt_0_100 as t6_annl_fee_multi_min_44,
+ min(`annl_fee`) over t6_cust_id_an_actv_dt_0_10 as t6_annl_fee_multi_min_45,
+ min(`card_agmt_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_card_agmt_id_an_multi_min_46,
+ avg(`card_agmt_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_card_agmt_id_an_multi_avg_47,
+ min(`pri_acct_id_an`) over t6_cust_id_an_actv_dt_0_100 as t6_pri_acct_id_an_multi_min_48,
+ min(`pri_acct_id_an`) over t6_cust_id_an_actv_dt_0_10 as t6_pri_acct_id_an_multi_min_49,
+ fz_topn_frequency(`actv_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_actv_chnl_cd_multi_top3frequency_50,
+ fz_topn_frequency(`actv_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_actv_chnl_cd_multi_top3frequency_51,
+ distinct_count(`atta_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_atta_card_ind_multi_unique_count_52,
+ fz_topn_frequency(`atta_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_atta_card_ind_multi_top3frequency_53,
+ fz_topn_frequency(`bus_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_bus_card_ind_multi_top3frequency_54,
+ distinct_count(`bus_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_bus_card_ind_multi_unique_count_55,
+ distinct_count(`camp_org_id`) over t6_cust_id_an_actv_dt_0_100 as t6_camp_org_id_multi_unique_count_56,
+ distinct_count(`camp_org_id`) over t6_cust_id_an_actv_dt_0_10 as t6_camp_org_id_multi_unique_count_57,
+ fz_topn_frequency(`card_matr_yr_mth`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_card_matr_yr_mth_multi_top3frequency_58,
+ fz_topn_frequency(`card_matr_yr_mth`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_card_matr_yr_mth_multi_top3frequency_59,
+ fz_topn_frequency(`card_org_cd`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_card_org_cd_multi_top3frequency_60,
+ distinct_count(`card_org_cd`) over t6_cust_id_an_actv_dt_0s_32d as t6_card_org_cd_multi_unique_count_61,
+ distinct_count(`crdt_card_lvl_cd`) over t6_cust_id_an_actv_dt_0_100 as t6_crdt_card_lvl_cd_multi_unique_count_62,
+ distinct_count(`crdt_card_lvl_cd`) over t6_cust_id_an_actv_dt_0_10 as t6_crdt_card_lvl_cd_multi_unique_count_63,
+ fz_topn_frequency(`free_annl_fee_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_free_annl_fee_ind_multi_top3frequency_64,
+ distinct_count(`free_annl_fee_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_free_annl_fee_ind_multi_unique_count_65,
+ fz_topn_frequency(`matr_contn_card_ind`, 3) over t6_cust_id_an_actv_dt_0s_32d as t6_matr_contn_card_ind_multi_top3frequency_66,
+ distinct_count(`matr_contn_card_ind`) over t6_cust_id_an_actv_dt_0s_32d as t6_matr_contn_card_ind_multi_unique_count_67,
+ distinct_count(`prod_id`) over t6_cust_id_an_actv_dt_0_100 as t6_prod_id_multi_unique_count_68,
+ fz_topn_frequency(`prod_id`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_prod_id_multi_top3frequency_69,
+ fz_topn_frequency(`sell_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_sell_chnl_cd_multi_top3frequency_70,
+ fz_topn_frequency(`sell_chnl_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_sell_chnl_cd_multi_top3frequency_71,
+ fz_topn_frequency(`snp_gage_cd`, 3) over t6_cust_id_an_actv_dt_0_10 as t6_snp_gage_cd_multi_top3frequency_72,
+ fz_topn_frequency(`snp_gage_cd`, 3) over t6_cust_id_an_actv_dt_0_100 as t6_snp_gage_cd_multi_top3frequency_73
+ from
+ (select `cust_id_an` as `cust_id_an`, int(0) as `card_agmt_id_an`, int(0) as `pri_acct_id_an`, '' as `atta_card_ind`, '' as `camp_org_id`, '' as `prod_id`, '' as `snp_gage_cd`, '' as `crdt_card_lvl_cd`, date('2019-07-18') as `pin_card_dt`, '' as `card_matr_yr_mth`, '' as `sell_chnl_cd`, '' as `card_org_cd`, '' as `actv_chnl_cd`, '' as `free_annl_fee_ind`, double(0) as `annl_fee`, '' as `bus_card_ind`, '' as `matr_contn_card_ind`, timestamp('2019-07-18 09:20:20') as `issu_card_dt`, `ins_date` as `actv_dt`, id from `t1`)
+ window t6_cust_id_an_actv_dt_0_100 as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t6_cust_id_an_actv_dt_0_10 as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t6_cust_id_an_actv_dt_0s_32d as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `pri_acct_id_an`, `atta_card_ind`, `camp_org_id`, `prod_id`, `snp_gage_cd`, `crdt_card_lvl_cd`, `pin_card_dt`, `card_matr_yr_mth`, `sell_chnl_cd`, `card_org_cd`, `actv_chnl_cd`, `free_annl_fee_ind`, `annl_fee`, `bus_card_ind`, `matr_contn_card_ind`, `issu_card_dt`, `actv_dt`, int(0) as id from `t6`) partition by `cust_id_an` order by `actv_dt` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW))
+ as out3
+ on out0.id_1 = out3.id_45
+ last join
+ (
+ select
+ id as id_75,
+ fz_topn_frequency(`card_agmt_id_an`, 3) over t7_cust_id_an_data_date_0s_1d as t7_card_agmt_id_an_multi_top3frequency_74,
+ fz_topn_frequency(`card_agmt_id_an`, 3) over t7_cust_id_an_data_date_0s_32d as t7_card_agmt_id_an_multi_top3frequency_75,
+ fz_topn_frequency(`card_stat_cd`, 3) over t7_cust_id_an_data_date_0_100 as t7_card_stat_cd_multi_top3frequency_76,
+ distinct_count(`card_stat_cd`) over t7_cust_id_an_data_date_0_100 as t7_card_stat_cd_multi_unique_count_77
+ from
+ (select `cust_id_an` as `cust_id_an`, int(0) as `card_agmt_id_an`, date('2019-07-18') as `fst_use_card_dt`, date('2019-07-18') as `ltst_use_card_dt`, '' as `card_stat_cd`, `ins_date` as `data_date`, id from `t1`)
+ window t7_cust_id_an_data_date_0s_1d as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t7_cust_id_an_data_date_0s_32d as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t7_cust_id_an_data_date_0_100 as (
+ UNION (select `cust_id_an`, `card_agmt_id_an`, `fst_use_card_dt`, `ltst_use_card_dt`, `card_stat_cd`, `data_date`, int(0) as id from `t7`) partition by `cust_id_an` order by `data_date` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+ as out4
+ on out0.id_1 = out4.id_75
+ last join
+ (
+ select
+ id as id_79,
+ min(`agmt_id_an`) over t8_cust_id_an_open_acct_dt_0_10 as t8_agmt_id_an_multi_min_78,
+ max(`agmt_id_an`) over t8_cust_id_an_open_acct_dt_0_10 as t8_agmt_id_an_multi_max_79,
+ fz_topn_frequency(`atta_acct_ind`, 3) over t8_cust_id_an_open_acct_dt_0s_32d as t8_atta_acct_ind_multi_top3frequency_80,
+ distinct_count(`atta_acct_ind`) over t8_cust_id_an_open_acct_dt_0s_32d as t8_atta_acct_ind_multi_unique_count_81
+ from
+ (select `cust_id_an` as `cust_id_an`, int(0) as `agmt_id_an`, '' as `atta_acct_ind`, date('2019-07-18') as `stmt_dt`, `ins_date` as `open_acct_dt`, timestamp('2019-07-18 09:20:20') as `clos_acct_dt`, id from `t1`)
+ window t8_cust_id_an_open_acct_dt_0_10 as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `atta_acct_ind`, `stmt_dt`, `open_acct_dt`, `clos_acct_dt`, int(0) as id from `t8`) partition by `cust_id_an` order by `open_acct_dt` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t8_cust_id_an_open_acct_dt_0s_32d as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `atta_acct_ind`, `stmt_dt`, `open_acct_dt`, `clos_acct_dt`, int(0) as id from `t8`) partition by `cust_id_an` order by `open_acct_dt` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW))
+ as out5
+ on out0.id_1 = out5.id_79
+ last join
+ (
+ select
+ id as id_83,
+ min(`amtbl_od_bal`) over t9_cust_id_an_data_date_0s_32d as t9_amtbl_od_bal_multi_min_82,
+ avg(`amtbl_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_amtbl_od_bal_multi_avg_83,
+ min(`cash_od_bal`) over t9_cust_id_an_data_date_0_100 as t9_cash_od_bal_multi_min_84,
+ min(`cash_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_cash_od_bal_multi_min_85,
+ min(`consm_od_bal`) over t9_cust_id_an_data_date_0_100 as t9_consm_od_bal_multi_min_86,
+ min(`consm_od_bal`) over t9_cust_id_an_data_date_0_10 as t9_consm_od_bal_multi_min_87,
+ max(`crdt_card_point`) over t9_cust_id_an_data_date_0_100 as t9_crdt_card_point_multi_max_88,
+ max(`crdt_card_point`) over t9_cust_id_an_data_date_0_10 as t9_crdt_card_point_multi_max_89,
+ min(`curr_yr_crdt_card_point`) over t9_cust_id_an_data_date_0_100 as t9_curr_yr_crdt_card_point_multi_min_90,
+ min(`curr_yr_crdt_card_point`) over t9_cust_id_an_data_date_0_10 as t9_curr_yr_crdt_card_point_multi_min_91,
+ max(`last_mth_consm_cnt`) over t9_cust_id_an_data_date_0_100 as t9_last_mth_consm_cnt_multi_max_92,
+ max(`last_mth_consm_cnt`) over t9_cust_id_an_data_date_0_10 as t9_last_mth_consm_cnt_multi_max_93,
+ min(`last_mth_stmt_amt`) over t9_cust_id_an_data_date_0_100 as t9_last_mth_stmt_amt_multi_min_94,
+ min(`last_mth_stmt_amt`) over t9_cust_id_an_data_date_0_10 as t9_last_mth_stmt_amt_multi_min_95,
+ min(`m_amtbl_amt_accm`) over t9_cust_id_an_data_date_0s_32d as t9_m_amtbl_amt_accm_multi_min_96,
+ avg(`m_amtbl_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_amtbl_amt_accm_multi_avg_97,
+ min(`m_cash_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_cash_amt_accm_multi_min_98,
+ min(`m_cash_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_cash_amt_accm_multi_min_99,
+ min(`m_consm_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_consm_amt_accm_multi_min_100,
+ min(`m_consm_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_consm_amt_accm_multi_min_101,
+ avg(`m_ovrd_bal_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_ovrd_bal_accm_multi_avg_102,
+ avg(`m_ovrd_bal_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_ovrd_bal_accm_multi_avg_103,
+ max(`m_spl_pay_amt_accm`) over t9_cust_id_an_data_date_0_100 as t9_m_spl_pay_amt_accm_multi_max_104,
+ max(`m_spl_pay_amt_accm`) over t9_cust_id_an_data_date_0_10 as t9_m_spl_pay_amt_accm_multi_max_105,
+ avg(`ovrd_bal`) over t9_cust_id_an_data_date_0_100 as t9_ovrd_bal_multi_avg_106,
+ avg(`ovrd_bal`) over t9_cust_id_an_data_date_0_10 as t9_ovrd_bal_multi_avg_107,
+ max(`spl_pay_bal`) over t9_cust_id_an_data_date_0_100 as t9_spl_pay_bal_multi_max_108,
+ max(`spl_pay_bal`) over t9_cust_id_an_data_date_0_10 as t9_spl_pay_bal_multi_max_109,
+ fz_topn_frequency(`acct_stat_cd`, 3) over t9_cust_id_an_data_date_0_100 as t9_acct_stat_cd_multi_top3frequency_110,
+ distinct_count(`acct_stat_cd`) over t9_cust_id_an_data_date_0_100 as t9_acct_stat_cd_multi_unique_count_111,
+ fz_topn_frequency(`agmt_id_an`, 3) over t9_cust_id_an_data_date_0s_1d as t9_agmt_id_an_multi_top3frequency_112,
+ fz_topn_frequency(`agmt_id_an`, 3) over t9_cust_id_an_data_date_0s_32d as t9_agmt_id_an_multi_top3frequency_113,
+ fz_topn_frequency(`curr_ovrd_stat_cd`, 3) over t9_cust_id_an_data_date_0_100 as t9_curr_ovrd_stat_cd_multi_top3frequency_114,
+ fz_topn_frequency(`curr_ovrd_stat_cd`, 3) over t9_cust_id_an_data_date_0_10 as t9_curr_ovrd_stat_cd_multi_top3frequency_115,
+ fz_topn_frequency(`curr_yr_ovrd_cnt`, 3) over t9_cust_id_an_data_date_0_100 as t9_curr_yr_ovrd_cnt_multi_top3frequency_116,
+ fz_topn_frequency(`curr_yr_ovrd_cnt`, 3) over t9_cust_id_an_data_date_0_10 as t9_curr_yr_ovrd_cnt_multi_top3frequency_117
+ from
+ (select `cust_id_an` as `cust_id_an`, int(0) as `agmt_id_an`, '' as `curr_ovrd_stat_cd`, int(0) as `curr_yr_ovrd_cnt`, double(0) as `curr_yr_crdt_card_point`, double(0) as `crdt_card_point`, '' as `acct_stat_cd`, double(0) as `consm_od_bal`, double(0) as `cash_od_bal`, double(0) as `amtbl_od_bal`, double(0) as `spl_pay_bal`, double(0) as `ovrd_bal`, double(0) as `last_mth_stmt_amt`, int(0) as `last_mth_consm_cnt`, double(0) as `m_consm_amt_accm`, double(0) as `m_cash_amt_accm`, double(0) as `m_amtbl_amt_accm`, double(0) as `m_spl_pay_amt_accm`, double(0) as `m_ovrd_bal_accm`, `ins_date` as `data_date`, id from `t1`)
+ window t9_cust_id_an_data_date_0s_32d as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t9_cust_id_an_data_date_0_10 as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t9_cust_id_an_data_date_0_100 as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW),
+ t9_cust_id_an_data_date_0s_1d as (
+ UNION (select `cust_id_an`, `agmt_id_an`, `curr_ovrd_stat_cd`, `curr_yr_ovrd_cnt`, `curr_yr_crdt_card_point`, `crdt_card_point`, `acct_stat_cd`, `consm_od_bal`, `cash_od_bal`, `amtbl_od_bal`, `spl_pay_bal`, `ovrd_bal`, `last_mth_stmt_amt`, `last_mth_consm_cnt`, `m_consm_amt_accm`, `m_cash_amt_accm`, `m_amtbl_amt_accm`, `m_spl_pay_amt_accm`, `m_ovrd_bal_accm`, `data_date`, int(0) as id from `t9`) partition by `cust_id_an` order by `data_date` rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW))
+ as out6
+ on out0.id_1 = out6.id_83
+ ;
+ expect:
+ success: true
diff --git a/cases/integration_test/spark/test_fqz_studio.yaml b/cases/integration_test/spark/test_fqz_studio.yaml
new file mode 100644
index 00000000000..cbbbaf5a5ec
--- /dev/null
+++ b/cases/integration_test/spark/test_fqz_studio.yaml
@@ -0,0 +1,363 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# 单表反欺诈场景
+db: test_fqz
+cases:
+ - id: 1
+ desc: 单表-反欺诈场景
+ inputs:
+ - columns: [ "id int32", "bilabel int32", "D_TXN_TIME_std timestamp", "C_SK_SEQ string", "D_RHX_DATE_KEY string", "N_CMX_TRAN_ID int32", "D_TXN_DATE string", "D_TXN_TIME string", "C_ACCT_TYPE string" , "C_ACCT_CARD_NO string", "C_ACCT_CARD_FLAG string", "C_ACCT_ZONE string", "N_ISSUE_AMT double", "N_TXN_AMT_RMB double", "C_ISSUE_CURR string", "C_CUSTOMER_ID string", "N_TRXCODE string", "C_DAILY_OPENBUY double", "C_INDI_OPENBUY double", "N_PHONE_NO string", "N_BOUND_PHONE_NO string", "C_MAC_ADDR string", "C_TXN_IP string", "C_MERCH_ID string", "C_MAC_STAT string", "C_IP_STAT string", "C_GREYLIST_FLAG string", "C_RULE_ACT string", "RRF_RULE_DATA string", "RRF_BUILD_NUM int32", "C_PAY_NAME string", "C_TXN_TYPE string", "C_PAYEE_ACCT string", "C_PAYEE_NAME string", "C_PAYEE_BANK_NAME string", "C_TXN_CHANNEL string", "C_SERIAL_NO string", "D_REGISTER_DATETIME double", "C_PAYEE_ACCT_ZONE string", "C_COMMONLY_PAYEE_FLAG string", "C_TRUST_PAYEE_FLAG string", "C_MEDIUM_NO string", "C_TRUST_CLIENT_FLAG string", "C_VERIFY_TYPE string", "C_PAYEE_CUSTOMER_ID string", "C_CPU_ID string", "C_MEMORY_CAPACITY double", "C_SYSTEM_VERSION string", "C_BROWSER_VERSION string", "C_BROWSER_LANG string", "C_SCREEN_RESOLUTION double", "C_APP_VERSION string", "C_FACTORY_INFO string", "C_WHITE_CARD_FLAG string", "C_ACCOUNT_BALANCE double", "C_MOBILE_LOCATION double", "C_DEAL_RESULT string", "C_FINAL_DEAL_TYPE string", "N_MODEL_SCORE double", "C_TXN_TYPE_TMP string", "N_UNIT_PRICE double", "N_TXN_COUNT double", "PROV string", "CITY string", "MER_OPTIMESP string", "MER_GROUPID int32", "MER_ZONENO string", "MER_BRNO string", "MER_SHOP_BASE string", "MER_PACCTYPE int32", "MER_FRATEPC string", "MER_SPECACCT int32", "MER_SWITCH int32", "MER_AMTLIMI1 int32", "MER_RATLIMI1 int32", "MER_BUSIAREA string", "CUS_Gender_Cd int32", "CUS_Ethnic_Cd int32", "CUS_Birth_Cty_Cd string", "CUS_Edu_Degree_Cd int32", "CUS_Marriage_Status_Cd int32", "CUS_Vip_Cust_Ind int32", "CUS_Icbc_Emply_Ind int32", "CUS_Dom_Resdnt_Ind int32", "CUS_Belong_Corp_Type_Cd int32", "CUS_Proper_Career_Cd string", "CUS_Proper_Industry_Cd int32", "CUS_Integrality_Ind_Cd int32", "CUS_Integrality_Check_Result int32", "CUS_Identity_Actl_Result_Type_Cd int32", "CUS_Cert_Provi_Situ_Type_Cd int32", "CUS_Invalid_Acct_Cert_Actl_Result int32", "CUS_Start_Dt string", "CUS_Birth_Dt string", "CUS_Career_Cd string", "CARDSTAT int32", "CARDKIND int32", "SYNFLAG int32", "GOLDFLAG int32", "OPENDATE date", "CDSQUOTA int64", "CDTQUOTA int64", "BT_CARDSTAT int32", "BT_ACTCHANEL int32", "BT_ACTDATE date", "BT_SALECODE string" ]
+ indexs: ["index1:C_ACCT_CARD_NO:D_TXN_TIME_std", "index2:N_BOUND_PHONE_NO:D_TXN_TIME_std", "index3:N_PHONE_NO:D_TXN_TIME_std", "index4:C_CUSTOMER_ID:D_TXN_TIME_std"]
+ rows:
+ - [33, 250, 1609236827000, "c_sk_seq", "d_rhx_date_key", 11, "d_txn_date", "d_txn_time", "c_acct_type" , "c_acct_card_no", "c_acct_card_flag", "c_acct_zone", 12.00, 13.14, "c_issue_curr", "c_customer_id", "n_trxcode", 14.12, 128.99, "n_phone_no", "n_bound_phone_no", "c_mac_addr", "c_txn_ip", "c_merch_id", "c_mac_stat", "c_ip_stat", "c_greylist_flag", "c_rule_act", "rrf_rule_data", 19, "c_pay_name", "c_txn_type", "c_payee_acct", "c_payee_name", "c_payee_bank_name", "c_txn_channel", "c_serial_no", 88.88, "c_payee_acct_zone", "c_commonly_payee_flag", "c_trust_payee_flag", "c_medium_no", "c_trust_client_flag", "c_verify_type", "c_payee_customer_id", "c_cpu_id", 77.07, "c_system_version", "c_browser_version", "c_browser_lang", 100.00, "c_app_version", "c_factory_info", "c_white_card_flag", 99.19, 67.81, "c_deal_result", "c_final_deal_type", 34.43, "c_txn_type_tmp", 88.08, 128.12, "prov", "city", "mer_optimesp", 939, "mer_zoneno", "mer_brno", "mer_shop_base", 477, "mer_fratepc", 122, 355, 223, 211, "mer_busiarea", 334, 444, "cus_birth_cty_cd", 555, 566, 577, 588, 42020, 314, "cus_proper_career_cd", 333, 41212, 666, 677, 688, 699, "cus_start_dt", "cus_birth_dt", "cus_career_cd", 61010, 777, 711, 733, "2020-12-22", 122, 999, 977, 432, "2021-01-02", "bt_salecode" ]
+ sql: |
+ select
+ id as id_1,
+ id as t1_id_original_0,
+ bilabel as t1_bilabel_original_1,
+ D_TXN_TIME_std as t1_D_TXN_TIME_std_original_2,
+ C_SK_SEQ as t1_C_SK_SEQ_original_3,
+ D_RHX_DATE_KEY as t1_D_RHX_DATE_KEY_original_4,
+ N_CMX_TRAN_ID as t1_N_CMX_TRAN_ID_original_5,
+ D_TXN_DATE as t1_D_TXN_DATE_original_6,
+ D_TXN_TIME as t1_D_TXN_TIME_original_7,
+ C_ACCT_TYPE as t1_C_ACCT_TYPE_original_8,
+ C_ACCT_CARD_NO as t1_C_ACCT_CARD_NO_original_9,
+ C_ACCT_CARD_FLAG as t1_C_ACCT_CARD_FLAG_original_10,
+ C_ACCT_ZONE as t1_C_ACCT_ZONE_original_11,
+ N_ISSUE_AMT as t1_N_ISSUE_AMT_original_12,
+ N_TXN_AMT_RMB as t1_N_TXN_AMT_RMB_original_13,
+ C_ISSUE_CURR as t1_C_ISSUE_CURR_original_14,
+ C_CUSTOMER_ID as t1_C_CUSTOMER_ID_original_15,
+ N_TRXCODE as t1_N_TRXCODE_original_16,
+ C_DAILY_OPENBUY as t1_C_DAILY_OPENBUY_original_17,
+ C_INDI_OPENBUY as t1_C_INDI_OPENBUY_original_18,
+ N_PHONE_NO as t1_N_PHONE_NO_original_19,
+ N_BOUND_PHONE_NO as t1_N_BOUND_PHONE_NO_original_20,
+ C_MAC_ADDR as t1_C_MAC_ADDR_original_21,
+ C_TXN_IP as t1_C_TXN_IP_original_22,
+ C_MERCH_ID as t1_C_MERCH_ID_original_23,
+ C_MAC_STAT as t1_C_MAC_STAT_original_24,
+ C_IP_STAT as t1_C_IP_STAT_original_25,
+ C_GREYLIST_FLAG as t1_C_GREYLIST_FLAG_original_26,
+ C_RULE_ACT as t1_C_RULE_ACT_original_27,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_original_28,
+ RRF_BUILD_NUM as t1_RRF_BUILD_NUM_original_29,
+ C_PAY_NAME as t1_C_PAY_NAME_original_30,
+ C_TXN_TYPE as t1_C_TXN_TYPE_original_31,
+ C_PAYEE_ACCT as t1_C_PAYEE_ACCT_original_32,
+ C_PAYEE_NAME as t1_C_PAYEE_NAME_original_33,
+ C_PAYEE_BANK_NAME as t1_C_PAYEE_BANK_NAME_original_34,
+ C_TXN_CHANNEL as t1_C_TXN_CHANNEL_original_35,
+ C_SERIAL_NO as t1_C_SERIAL_NO_original_36,
+ D_REGISTER_DATETIME as t1_D_REGISTER_DATETIME_original_37,
+ C_PAYEE_ACCT_ZONE as t1_C_PAYEE_ACCT_ZONE_original_38,
+ C_COMMONLY_PAYEE_FLAG as t1_C_COMMONLY_PAYEE_FLAG_original_39,
+ C_TRUST_PAYEE_FLAG as t1_C_TRUST_PAYEE_FLAG_original_40,
+ C_MEDIUM_NO as t1_C_MEDIUM_NO_original_41,
+ C_TRUST_CLIENT_FLAG as t1_C_TRUST_CLIENT_FLAG_original_42,
+ C_VERIFY_TYPE as t1_C_VERIFY_TYPE_original_43,
+ C_PAYEE_CUSTOMER_ID as t1_C_PAYEE_CUSTOMER_ID_original_44,
+ C_CPU_ID as t1_C_CPU_ID_original_45,
+ C_MEMORY_CAPACITY as t1_C_MEMORY_CAPACITY_original_46,
+ C_SYSTEM_VERSION as t1_C_SYSTEM_VERSION_original_47,
+ C_BROWSER_VERSION as t1_C_BROWSER_VERSION_original_48,
+ C_BROWSER_LANG as t1_C_BROWSER_LANG_original_49,
+ C_SCREEN_RESOLUTION as t1_C_SCREEN_RESOLUTION_original_50,
+ C_APP_VERSION as t1_C_APP_VERSION_original_51,
+ C_FACTORY_INFO as t1_C_FACTORY_INFO_original_52,
+ C_WHITE_CARD_FLAG as t1_C_WHITE_CARD_FLAG_original_53,
+ C_ACCOUNT_BALANCE as t1_C_ACCOUNT_BALANCE_original_54,
+ C_MOBILE_LOCATION as t1_C_MOBILE_LOCATION_original_55,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_original_56,
+ C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_original_57,
+ N_MODEL_SCORE as t1_N_MODEL_SCORE_original_58,
+ C_TXN_TYPE_TMP as t1_C_TXN_TYPE_TMP_original_59,
+ N_UNIT_PRICE as t1_N_UNIT_PRICE_original_60,
+ N_TXN_COUNT as t1_N_TXN_COUNT_original_61,
+ PROV as t1_PROV_original_62,
+ CITY as t1_CITY_original_63,
+ MER_OPTIMESP as t1_MER_OPTIMESP_original_64,
+ MER_GROUPID as t1_MER_GROUPID_original_65,
+ MER_ZONENO as t1_MER_ZONENO_original_66,
+ MER_BRNO as t1_MER_BRNO_original_67,
+ MER_SHOP_BASE as t1_MER_SHOP_BASE_original_68,
+ MER_PACCTYPE as t1_MER_PACCTYPE_original_69,
+ MER_FRATEPC as t1_MER_FRATEPC_original_70,
+ MER_SPECACCT as t1_MER_SPECACCT_original_71,
+ MER_SWITCH as t1_MER_SWITCH_original_72,
+ MER_AMTLIMI1 as t1_MER_AMTLIMI1_original_73,
+ MER_RATLIMI1 as t1_MER_RATLIMI1_original_74,
+ MER_BUSIAREA as t1_MER_BUSIAREA_original_75,
+ CUS_Gender_Cd as t1_CUS_Gender_Cd_original_76,
+ CUS_Ethnic_Cd as t1_CUS_Ethnic_Cd_original_77,
+ CUS_Birth_Cty_Cd as t1_CUS_Birth_Cty_Cd_original_78,
+ CUS_Edu_Degree_Cd as t1_CUS_Edu_Degree_Cd_original_79,
+ CUS_Marriage_Status_Cd as t1_CUS_Marriage_Status_Cd_original_80,
+ CUS_Vip_Cust_Ind as t1_CUS_Vip_Cust_Ind_original_81,
+ CUS_Icbc_Emply_Ind as t1_CUS_Icbc_Emply_Ind_original_82,
+ CUS_Dom_Resdnt_Ind as t1_CUS_Dom_Resdnt_Ind_original_83,
+ CUS_Belong_Corp_Type_Cd as t1_CUS_Belong_Corp_Type_Cd_original_84,
+ CUS_Proper_Career_Cd as t1_CUS_Proper_Career_Cd_original_85,
+ CUS_Proper_Industry_Cd as t1_CUS_Proper_Industry_Cd_original_86,
+ CUS_Integrality_Ind_Cd as t1_CUS_Integrality_Ind_Cd_original_87,
+ CUS_Integrality_Check_Result as t1_CUS_Integrality_Check_Result_original_88,
+ CUS_Identity_Actl_Result_Type_Cd as t1_CUS_Identity_Actl_Result_Type_Cd_original_89,
+ CUS_Cert_Provi_Situ_Type_Cd as t1_CUS_Cert_Provi_Situ_Type_Cd_original_90,
+ CUS_Invalid_Acct_Cert_Actl_Result as t1_CUS_Invalid_Acct_Cert_Actl_Result_original_91,
+ CUS_Start_Dt as t1_CUS_Start_Dt_original_92,
+ CUS_Birth_Dt as t1_CUS_Birth_Dt_original_93,
+ CUS_Career_Cd as t1_CUS_Career_Cd_original_94,
+ CARDSTAT as t1_CARDSTAT_original_95,
+ CARDKIND as t1_CARDKIND_original_96,
+ SYNFLAG as t1_SYNFLAG_original_97,
+ GOLDFLAG as t1_GOLDFLAG_original_98,
+ OPENDATE as t1_OPENDATE_original_99,
+ CDSQUOTA as t1_CDSQUOTA_original_100,
+ CDTQUOTA as t1_CDTQUOTA_original_101,
+ BT_CARDSTAT as t1_BT_CARDSTAT_original_102,
+ BT_ACTCHANEL as t1_BT_ACTCHANEL_original_103,
+ BT_ACTDATE as t1_BT_ACTDATE_original_104,
+ BT_SALECODE as t1_BT_SALECODE_original_105,
+ distinct_count(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as t1_C_SERIAL_NO_window_unique_count_106,
+ distinct_count(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_107,
+ distinct_count(C_FACTORY_INFO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_108,
+ distinct_count(C_FACTORY_INFO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_109,
+ fz_top1_ratio(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_top1_ratio_110,
+ fz_top1_ratio(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_top1_ratio_111,
+ distinct_count(C_APP_VERSION) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_APP_VERSION_window_unique_count_112,
+ distinct_count(C_SERIAL_NO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_113,
+ distinct_count(C_FACTORY_INFO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_114,
+ distinct_count(C_SERIAL_NO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_115,
+ distinct_count(C_FACTORY_INFO) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_116,
+ max(C_SCREEN_RESOLUTION) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SCREEN_RESOLUTION_window_max_117,
+ max(C_SCREEN_RESOLUTION) over t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as t1_C_SCREEN_RESOLUTION_window_max_118,
+ distinct_count(C_FACTORY_INFO) over t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_119,
+ distinct_count(C_FACTORY_INFO) over t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as t1_C_FACTORY_INFO_window_unique_count_120,
+ distinct_count(C_FACTORY_INFO) over t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_2764800s as t1_C_FACTORY_INFO_window_unique_count_121,
+ distinct_count(C_SERIAL_NO) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SERIAL_NO_window_unique_count_122,
+ max(C_SCREEN_RESOLUTION) over t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as t1_C_SCREEN_RESOLUTION_window_max_123,
+ C_IP_STAT as t1_C_IP_STAT_combine_124,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_124,
+ CITY as t1_CITY_combine_124,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_125,
+ C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_combine_125,
+ CITY as t1_CITY_combine_125,
+ C_IP_STAT as t1_C_IP_STAT_combine_126,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_126,
+ C_APP_VERSION as t1_C_APP_VERSION_combine_126,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_127,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_127,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_127,
+ C_IP_STAT as t1_C_IP_STAT_combine_128,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_128,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_128,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_129,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_129,
+ PROV as t1_PROV_combine_129,
+ C_MAC_STAT as t1_C_MAC_STAT_combine_130,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_130,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_130,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_131,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_131,
+ C_FINAL_DEAL_TYPE as t1_C_FINAL_DEAL_TYPE_combine_131,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_132,
+ C_VERIFY_TYPE as t1_C_VERIFY_TYPE_combine_132,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_132,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_133,
+ C_MEDIUM_NO as t1_C_MEDIUM_NO_combine_133,
+ C_DEAL_RESULT as t1_C_DEAL_RESULT_combine_133,
+ C_MAC_STAT as t1_C_MAC_STAT_combine_134,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_134,
+ PROV as t1_PROV_combine_134,
+ C_RULE_ACT as t1_C_RULE_ACT_combine_135,
+ RRF_RULE_DATA as t1_RRF_RULE_DATA_combine_135,
+ C_VERIFY_TYPE as t1_C_VERIFY_TYPE_combine_135
+ from
+ {0}
+ window t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_2764800s as ( partition by C_ACCT_CARD_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_C_ACCT_CARD_NO_D_TXN_TIME_std_0s_1209600s as ( partition by C_ACCT_CARD_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_2764800s as ( partition by N_BOUND_PHONE_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_N_PHONE_NO_D_TXN_TIME_std_0s_2764800s as ( partition by N_PHONE_NO order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_N_BOUND_PHONE_NO_D_TXN_TIME_std_0s_1209600s as ( partition by N_BOUND_PHONE_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_N_PHONE_NO_D_TXN_TIME_std_0s_1209600s as ( partition by N_PHONE_NO order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_1209600s as ( partition by C_CUSTOMER_ID order by D_TXN_TIME_std rows_range between 1209600s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_C_CUSTOMER_ID_D_TXN_TIME_std_0s_2764800s as ( partition by C_CUSTOMER_ID order by D_TXN_TIME_std rows_range between 2764800s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ expect:
+ success: true
+ # columns: [
+ # "id_1 int32",
+ # "t1_id_original_0 int32",
+ # "t1_bilabel_original_1 int32",
+ # "t1_D_TXN_TIME_std_original_2 timestamp",
+ # "t1_C_SK_SEQ_original_3 string",
+ # "t1_D_RHX_DATE_KEY_original_4 string",
+ # "t1_N_CMX_TRAN_ID_original_5 int32",
+ # "t1_D_TXN_DATE_original_6 string",
+ # "t1_D_TXN_TIME_original_7 string",
+ # "t1_C_ACCT_TYPE_original_8 string",
+ # "t1_C_ACCT_CARD_NO_original_9 string",
+ # "t1_C_ACCT_CARD_FLAG_original_10 string",
+ # "t1_C_ACCT_ZONE_original_11 string",
+ # "t1_N_ISSUE_AMT_original_12 double",
+ # "t1_N_TXN_AMT_RMB_original_13 double",
+ # "t1_C_ISSUE_CURR_original_14 string",
+ # "t1_C_CUSTOMER_ID_original_15 string",
+ # "t1_N_TRXCODE_original_16 string",
+ # "t1_C_DAILY_OPENBUY_original_17 double",
+ # "t1_C_INDI_OPENBUY_original_18 double",
+ # "t1_N_PHONE_NO_original_19 string",
+ # "t1_N_BOUND_PHONE_NO_original_20 string",
+ # "t1_C_MAC_ADDR_original_21 string",
+ # "t1_C_TXN_IP_original_22 string",
+ # "t1_C_MERCH_ID_original_23 string",
+ # "t1_C_MAC_STAT_original_24 string",
+ # "t1_C_IP_STAT_original_25 string",
+ # "t1_C_GREYLIST_FLAG_original_26 string",
+ # "t1_C_RULE_ACT_original_27 string",
+ # "t1_RRF_RULE_DATA_original_28 string",
+ # "t1_RRF_BUILD_NUM_original_29 int32",
+ # "t1_C_PAY_NAME_original_30 string",
+ # "t1_C_TXN_TYPE_original_31 string",
+ # "t1_C_PAYEE_ACCT_original_32 string",
+ # "t1_C_PAYEE_NAME_original_33 string",
+ # "t1_C_PAYEE_BANK_NAME_original_34 string",
+ # "t1_C_TXN_CHANNEL_original_35 string",
+ # "t1_C_SERIAL_NO_original_36 string",
+ # "t1_D_REGISTER_DATETIME_original_37 double",
+ # "t1_C_PAYEE_ACCT_ZONE_original_38 string",
+ # "t1_C_COMMONLY_PAYEE_FLAG_original_39 string",
+ # "t1_C_TRUST_PAYEE_FLAG_original_40 string",
+ # "t1_C_MEDIUM_NO_original_41 string",
+ # "t1_C_TRUST_CLIENT_FLAG_original_42 string",
+ # "t1_C_VERIFY_TYPE_original_43 string",
+ # "t1_C_PAYEE_CUSTOMER_ID_original_44 string",
+ # "t1_C_CPU_ID_original_45 string",
+ # "t1_C_MEMORY_CAPACITY_original_46 double",
+ # "t1_C_SYSTEM_VERSION_original_47 string",
+ # "t1_C_BROWSER_VERSION_original_48 string",
+ # "t1_C_BROWSER_LANG_original_49 string",
+ # "t1_C_SCREEN_RESOLUTION_original_50 double",
+ # "t1_C_APP_VERSION_original_51 string",
+ # "t1_C_FACTORY_INFO_original_52 string",
+ # "t1_C_WHITE_CARD_FLAG_original_53 string",
+ # "t1_C_ACCOUNT_BALANCE_original_54 double",
+ # "t1_C_MOBILE_LOCATION_original_55 double",
+ # "t1_C_DEAL_RESULT_original_56 string",
+ # "t1_C_FINAL_DEAL_TYPE_original_57 string",
+ # "t1_N_MODEL_SCORE_original_58 double",
+ # "t1_C_TXN_TYPE_TMP_original_59 string",
+ # "t1_N_UNIT_PRICE_original_60 double",
+ # "t1_N_TXN_COUNT_original_61 double",
+ # "t1_PROV_original_62 string",
+ # "t1_CITY_original_63 string",
+ # "t1_MER_OPTIMESP_original_64 string",
+ # "t1_MER_GROUPID_original_65 int32",
+ # "t1_MER_ZONENO_original_66 string",
+ # "t1_MER_BRNO_original_67 string",
+ # "t1_MER_SHOP_BASE_original_68 string",
+ # "t1_MER_PACCTYPE_original_69 int32",
+ # "t1_MER_FRATEPC_original_70 string",
+ # "t1_MER_SPECACCT_original_71 int32",
+ # "t1_MER_SWITCH_original_72 int32",
+ # "t1_MER_AMTLIMI1_original_73 int32",
+ # "t1_MER_RATLIMI1_original_74 int32",
+ # "t1_MER_BUSIAREA_original_75 string",
+ # "t1_CUS_Gender_Cd_original_76 int32",
+ # "t1_CUS_Ethnic_Cd_original_77 int32",
+ # "t1_CUS_Birth_Cty_Cd_original_78 string",
+ # "t1_CUS_Edu_Degree_Cd_original_79 int32",
+ # "t1_CUS_Marriage_Status_Cd_original_80 int32",
+ # "t1_CUS_Vip_Cust_Ind_original_81 int32",
+ # "t1_CUS_Icbc_Emply_Ind_original_82 int32",
+ # "t1_CUS_Dom_Resdnt_Ind_original_83 int32",
+ # "t1_CUS_Belong_Corp_Type_Cd_original_84 int32",
+ # "t1_CUS_Proper_Career_Cd_original_85 string",
+ # "t1_CUS_Proper_Industry_Cd_original_86 int32",
+ # "t1_CUS_Integrality_Ind_Cd_original_87 int32",
+ # "t1_CUS_Integrality_Check_Result_original_88 int32",
+ # "t1_CUS_Identity_Actl_Result_Type_Cd_original_89 int32",
+ # "t1_CUS_Cert_Provi_Situ_Type_Cd_original_90 int32",
+ # "t1_CUS_Invalid_Acct_Cert_Actl_Result_original_91 int32",
+ # "t1_CUS_Start_Dt_original_92 string",
+ # "t1_CUS_Birth_Dt_original_93 string",
+ # "t1_CUS_Career_Cd_original_94 string",
+ # "t1_CARDSTAT_original_95 int32",
+ # "t1_CARDKIND_original_96 int32",
+ # "t1_SYNFLAG_original_97 int32",
+ # "t1_GOLDFLAG_original_98 int32",
+ # "t1_OPENDATE_original_99 date",
+ # "t1_CDSQUOTA_original_100 int64",
+ # "t1_CDTQUOTA_original_101 int64",
+ # "t1_BT_CARDSTAT_original_102 int32",
+ # "t1_BT_ACTCHANEL_original_103 int32",
+ # "t1_BT_ACTDATE_original_104 date",
+ # "t1_BT_SALECODE_original_105 string",
+ # "t1_C_SERIAL_NO_window_unique_count_106 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_107 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_108 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_109 int",
+ # "t1_C_SERIAL_NO_window_top1_ratio_110 double",
+ # "t1_C_FACTORY_INFO_window_top1_ratio_111 double",
+ # "t1_C_APP_VERSION_window_unique_count_112 int",
+ # "t1_C_SERIAL_NO_window_unique_count_113 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_114 int",
+ # "t1_C_SERIAL_NO_window_unique_count_115 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_116 int",
+ # "t1_C_SCREEN_RESOLUTION_window_max_117 double",
+ # "t1_C_SCREEN_RESOLUTION_window_max_118 double",
+ # "t1_C_FACTORY_INFO_window_unique_count_119 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_120 int",
+ # "t1_C_FACTORY_INFO_window_unique_count_121 int",
+ # "t1_C_SERIAL_NO_window_unique_count_122 int",
+ # "t1_C_SCREEN_RESOLUTION_window_max_123 double",
+ # "t1_C_IP_STAT_combine_124 string",
+ # "t1_C_RULE_ACT_combine_124 string",
+ # "t1_CITY_combine_124 string",
+ # "t1_C_RULE_ACT_combine_125 string",
+ # "t1_C_FINAL_DEAL_TYPE_combine_125 string",
+ # "t1_CITY_combine_125 string",
+ # "t1_C_IP_STAT_combine_126 string",
+ # "t1_RRF_RULE_DATA_combine_126 string",
+ # "t1_C_APP_VERSION_combine_126 string",
+ # "t1_C_RULE_ACT_combine_127 string",
+ # "t1_RRF_RULE_DATA_combine_127 string",
+ # "t1_C_DEAL_RESULT_combine_127 string",
+ # "t1_C_IP_STAT_combine_128 string",
+ # "t1_RRF_RULE_DATA_combine_128 string",
+ # "t1_C_DEAL_RESULT_combine_128 string",
+ # "t1_C_RULE_ACT_combine_129 string",
+ # "t1_RRF_RULE_DATA_combine_129 string",
+ # "t1_PROV_combine_129 string",
+ # "t1_C_MAC_STAT_combine_130 string",
+ # "t1_RRF_RULE_DATA_combine_130 string",
+ # "t1_C_DEAL_RESULT_combine_130 string",
+ # "t1_C_RULE_ACT_combine_131 string",
+ # "t1_RRF_RULE_DATA_combine_131 string",
+ # "C_FINAL_DEt1_C_FINAL_DEAL_TYPE_combine_131 string",
+ # "t1_RRF_RULE_DATA_combine_132 string",
+ # "t1_C_VERIFY_TYPE_combine_132 string",
+ # "t1_C_DEAL_RESULT_combine_132 string",
+ # "t1_RRF_RULE_DATA_combine_133 string",
+ # "t1_C_MEDIUM_NO_combine_133 string",
+ # "t1_C_DEAL_RESULT_combine_133 string",
+ # "t1_C_MAC_STAT_combine_134 string",
+ # "t1_RRF_RULE_DATA_combine_134 string",
+ # "t1_PROV_combine_134 string",
+ # "t1_C_RULE_ACT_combine_135 string",
+ # "t1_RRF_RULE_DATA_combine_135 string",
+ # "t1_C_VERIFY_TYPE_combine_135 string"
+ # ]
+ #
diff --git a/cases/integration_test/spark/test_jd.yaml b/cases/integration_test/spark/test_jd.yaml
new file mode 100644
index 00000000000..02744f958f4
--- /dev/null
+++ b/cases/integration_test/spark/test_jd.yaml
@@ -0,0 +1,307 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_db
+cases:
+- id: 1
+ desc: 多表-京东数据场景
+ inputs:
+ - columns:
+ - "id int32"
+ - "user_id int32"
+ - "sku_id int32"
+ - "date timestamp"
+ - "label int32"
+ indexs: ["index1:user_id:date"]
+ rows:
+ - - 459992740
+ - -1311478396
+ - 659918340
+ - 1611146000000
+ - -1939588571
+ - - -543207062
+ - 507763171
+ - 954458270
+ - 1611146000000
+ - -1603336561
+ - - -1304001546
+ - -769990921
+ - -2013336026
+ - 1611146000000
+ - -159697690
+ - - 158625020
+ - -945166892
+ - -74761189
+ - 1611146000000
+ - -93625855
+ - - 658374105
+ - -1246658137
+ - -1487653472
+ - 1611146000000
+ - -2042844456
+ - - -1036345552
+ - -1145428983
+ - -322971158
+ - 1611146000000
+ - -2141990920
+ - - -1454270183
+ - 653071136
+ - -1843758289
+ - 1611146000000
+ - -685391703
+ - - -27071105
+ - 630100915
+ - 314469207
+ - 1611146000000
+ - 993761881
+ - - 38809088
+ - -1539014266
+ - 295127280
+ - 1611146000000
+ - -1518440147
+ - - -1037180916
+ - -1318776756
+ - 244202015
+ - 1611146000000
+ - -2111130440
+ name: all
+ - columns:
+ - "user_id int32"
+ - "age string"
+ - "sex int32"
+ - "user_lv_cd int32"
+ - "user_reg_tm timestamp"
+ indexs: ["index_user:user_id:user_reg_tm"]
+ rows:
+ - - -1275547367
+ - "age_KGJgiSMgcx"
+ - -1321603784
+ - 679568701
+ - 1611146001000
+ - - 193784185
+ - "age_z7XwDlSdzE"
+ - -918521235
+ - -1839640562
+ - 1611146001000
+ - - -1500008039
+ - "age_UxLHj6n5iG"
+ - -490726213
+ - -2044459492
+ - 1611146001000
+ name: user
+ - columns:
+ - "sku_id int32"
+ - "a1 int32"
+ - "a2 int32"
+ - "a3 int32"
+ - "cate int32"
+ - "brand int32"
+ indexs: ["index_pdt:sku_id"]
+ rows:
+ - - 200135598
+ - 620202989
+ - -1819873162
+ - 944811254
+ - -1016957005
+ - -348886786
+ - - -1812792532
+ - -548438081
+ - 408684499
+ - -546175077
+ - 18157988
+ - -1619495426
+ - - 740971942
+ - -995983125
+ - -74505618
+ - 875561670
+ - -1701622561
+ - -2066012196
+ - - -1953481289
+ - 394506620
+ - -871334434
+ - -1883922132
+ - 337664649
+ - -678183716
+ - - 690079825
+ - -124658147
+ - -2013081012
+ - 514316543
+ - -1892105452
+ - -398640514
+ - - -1357806486
+ - -1866091467
+ - -848394605
+ - -1321197691
+ - 1037826917
+ - 576025216
+ name: product
+ - columns:
+ - "user_id int32"
+ - "sku_id int32"
+ - "time timestamp"
+ - "model_id int32"
+ - "type int32"
+ - "cate int32"
+ - "brand int32"
+ indexs: ["index:user_id:time"]
+ rows:
+ - - -946359508
+ - -784482204
+ - 1611146001000
+ - 831631177
+ - 50026040
+ - 125260267
+ - -1212429112
+ - - 674634423
+ - -608174802
+ - 1611146001000
+ - -1094861038
+ - -1421894956
+ - -3671335
+ - -1054215935
+ - - 548059146
+ - -271665164
+ - 1611146001000
+ - 81808312
+ - -1996872304
+ - 660746138
+ - 786421686
+ - - -1970341445
+ - -900311277
+ - 1611146001000
+ - -107428720
+ - 746853108
+ - -805673533
+ - -860397196
+ name: action
+ - columns:
+ - "sku_id int32"
+ - "comment_num int32"
+ - "has_bad_comment int32"
+ - "bad_comment_rate double"
+ - "dt timestamp"
+ indexs: ["index1:sku_id:dt"]
+ rows:
+ - - -2009402124
+ - -130694795
+ - -377940874
+ - -38.93
+ - 1611146001000
+ - - -284125685
+ - 216789062
+ - 520778695
+ - -73.75
+ - 1611146001000
+ - - -2059682888
+ - 865555637
+ - -370172128
+ - -62.3
+ - 1611146001000
+ - - -1747089957
+ - -720960620
+ - -113399911
+ - -109.97
+ - 1611146001000
+ - - -1446988855
+ - 964829781
+ - -796129056
+ - 43.56
+ - 1611146001000
+ - - -931224783
+ - 784179322
+ - -1570583655
+ - 7.31
+ - 1611146001000
+ - - -986441723
+ - -1938361365
+ - -986946742
+ - 98.82
+ - 1611146001000
+ name: comment
+ sql: |-
+ select * from
+ (
+ select
+ id as id_1,
+ `id` as all_id_original_0,
+ `user_id` as all_user_id_original_1,
+ `sku_id` as all_sku_id_original_2,
+ `date` as all_date_original_3,
+ `label` as all_label_original_4,
+ fz_top1_ratio(`id`) over all_user_id_date_0s_2764800s as all_id_window_top1_ratio_28,
+ fz_top1_ratio(`sku_id`) over all_user_id_date_0s_2764800s as all_sku_id_window_top1_ratio_29,
+ distinct_count(`sku_id`) over all_user_id_date_0s_2764800s as all_sku_id_window_unique_count_30,
+ fz_top1_ratio(`sku_id`) over all_user_id_date_0s_5529600s as all_sku_id_window_top1_ratio_31,
+ fz_top1_ratio(`id`) over all_user_id_date_0s_5529600s as all_id_window_top1_ratio_32,
+ `sku_id` as all_sku_id_combine_33,
+ `sku_id` as all_sku_id_combine_34,
+ `sku_id` as all_sku_id_combine_35,
+ `sku_id` as all_sku_id_combine_36,
+ `sku_id` as all_sku_id_combine_37,
+ `sku_id` as all_sku_id_combine_38,
+ `sku_id` as all_sku_id_combine_39
+ from
+ `all`
+ window all_user_id_date_0s_2764800s as (partition by `user_id` order by `date` rows_range between 2764800s preceding and 0s preceding),
+ all_user_id_date_0s_5529600s as (partition by `user_id` order by `date` rows_range between 5529600s preceding and 0s preceding))
+ as out0
+ last join
+ (
+ select
+ `all`.id as id_6,
+ `comment_sku_id__date_0s_1209600s`.`bad_comment_rate` as comment_bad_comment_rate_multi_last_value_5,
+ `comment_sku_id__date_0s_1209600s`.`comment_num` as comment_comment_num_multi_last_value_6,
+ `comment_sku_id__date_0s_1209600s`.`dt` as comment_dt_multi_last_value_7,
+ `comment_sku_id__date_0s_1209600s`.`has_bad_comment` as comment_has_bad_comment_multi_last_value_8,
+ `product_sku_id`.`a1` as product_a1_multi_direct_9,
+ `product_sku_id`.`a2` as product_a2_multi_direct_10,
+ `product_sku_id`.`a3` as product_a3_multi_direct_11,
+ `product_sku_id`.`brand` as product_brand_multi_direct_12,
+ `product_sku_id`.`cate` as product_cate_multi_direct_13,
+ `user_user_id`.`age` as user_age_multi_direct_14,
+ `user_user_id`.`sex` as user_sex_multi_direct_15,
+ `user_user_id`.`user_lv_cd` as user_user_lv_cd_multi_direct_16,
+ `user_user_id`.`user_reg_tm` as user_user_reg_tm_multi_direct_17
+ from
+ `all`
+ last join `comment` as `comment_sku_id__date_0s_1209600s` order by comment_sku_id__date_0s_1209600s.`dt` on `all`.`sku_id` = `comment_sku_id__date_0s_1209600s`.`sku_id` and comment_sku_id__date_0s_1209600s.`dt` < `all`.`date` - 0 and comment_sku_id__date_0s_1209600s.`dt` > `all`.`date` - 1209600000
+ last join `product` as `product_sku_id` on `all`.`sku_id` = `product_sku_id`.`sku_id`
+ last join `user` as `user_user_id` on `all`.`user_id` = `user_user_id`.`user_id`)
+ as out1
+ on out0.id_1 = out1.id_6
+ last join
+ (
+ select
+ id as id_19,
+ fz_topn_frequency(`brand`, 3) over action_user_id_time_0s_32d as action_brand_multi_top3frequency_18,
+ distinct_count(`brand`) over action_user_id_time_0_100 as action_brand_multi_unique_count_19,
+ distinct_count(`cate`) over action_user_id_time_0_100 as action_cate_multi_unique_count_20,
+ distinct_count(`cate`) over action_user_id_time_0s_32d as action_cate_multi_unique_count_21,
+ fz_topn_frequency(`model_id`, 3) over action_user_id_time_0s_32d as action_model_id_multi_top3frequency_22,
+ distinct_count(`model_id`) over action_user_id_time_0_100 as action_model_id_multi_unique_count_23,
+ distinct_count(`sku_id`) over action_user_id_time_0_100 as action_sku_id_multi_unique_count_24,
+ distinct_count(`sku_id`) over action_user_id_time_0s_32d as action_sku_id_multi_unique_count_25,
+ fz_topn_frequency(`type`, 3) over action_user_id_time_0s_32d as action_type_multi_top3frequency_26,
+ fz_topn_frequency(`type`, 3) over action_user_id_time_0_100 as action_type_multi_top3frequency_27
+ from
+ (select `user_id` as `user_id`, int(0) as `sku_id`, `date` as `time`, int(0) as `model_id`, int(0) as `type`, int(0) as `cate`, int(0) as `brand`, id from `all`)
+ window action_user_id_time_0s_32d as (
+ UNION (select `user_id`, `sku_id`, `time`, `model_id`, `type`, `cate`, `brand`, int(0) as id from `action`) partition by `user_id` order by `time` rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ action_user_id_time_0_100 as (
+ UNION (select `user_id`, `sku_id`, `time`, `model_id`, `type`, `cate`, `brand`, int(0) as id from `action`) partition by `user_id` order by `time` rows between 100 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+ as out2
+ on out0.id_1 = out2.id_19
+ ;
+ expect:
+ success: true
diff --git a/cases/integration_test/spark/test_news.yaml b/cases/integration_test/spark/test_news.yaml
new file mode 100644
index 00000000000..ff449b296c3
--- /dev/null
+++ b/cases/integration_test/spark/test_news.yaml
@@ -0,0 +1,439 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: template_db
+cases:
+- id: 1
+ desc: 单表-新闻场景
+ inputs:
+ - columns:
+ - "InstanceKey string"
+ - "RequestDatetime timestamp"
+ - "PageId string"
+ - "NewsId string"
+ - "CategoryId string"
+ - "TermScores string"
+ - "TitleTermScores string"
+ - "TagScores string"
+ - "UserTagScores string"
+ - "UserTermScores string"
+ - "MediaId string"
+ - "ContentWords int32"
+ - "TitleWords int32"
+ - "Tag string"
+ - "TotalLikes int32"
+ - "TotalDislikes int32"
+ - "TotalComments int32"
+ - "TotalImpressions int32"
+ - "TotalAdjustImpressions int32"
+ - "TotalClicks int32"
+ - "TotalShares int32"
+ - "UserId string"
+ - "RequestLatitude double"
+ - "RequestLongitude double"
+ - "DeviceId string"
+ - "UserIp string"
+ - "Clicked int32"
+ - "UserClickedMediaIdsIn1Times string"
+ - "UserClickedMediaIdsIn3Times string"
+ - "UserClickedMediaIdsIn10Times string"
+ - "UserClickedMediaIdsIn1Minutes string"
+ - "UserClickedMediaIdsIn5Minutes string"
+ - "UserClickedMediaIdsIn30Minutes string"
+ - "UserClickedMediaIdsIn360Minutes string"
+ - "UserClickedCatIdsIn1Times string"
+ - "UserClickedCatIdsIn3Times string"
+ - "UserClickedCatIdsIn10Times string"
+ - "UserClickedCatIdsIn1Minutes string"
+ - "UserClickedCatIdsIn5Minutes string"
+ - "UserClickedCatIdsIn30Minutes string"
+ - "UserClickedCatIdsIn360Minutes string"
+ - "UserClickedTagScoresIn1Times string"
+ - "UserClickedTagScoresIn3Times string"
+ - "UserClickedTagScoresIn10Times string"
+ - "UserClickedTagScoresIn1Minutes string"
+ - "UserClickedTagScoresIn5Minutes string"
+ - "UserClickedTagScoresIn30Minutes string"
+ - "UserClickedTagScoresIn360Minutes string"
+ - "UserClickedTermScoresIn1Times string"
+ - "UserClickedTermScoresIn3Times string"
+ - "UserClickedTermScoresIn10Times string"
+ - "UserClickedTermScoresIn1Minutes string"
+ - "UserClickedTermScoresIn5Minutes string"
+ - "UserClickedTermScoresIn30Minutes string"
+ - "UserClickedTermScoresIn360Minutes string"
+ - "UserClickedTitleTermScoresIn1Times string"
+ - "UserClickedTitleTermScoresIn3Times string"
+ - "UserClickedTitleTermScoresIn10Times string"
+ - "UserClickedTitleTermScoresIn1Minutes string"
+ - "UserClickedTitleTermScoresIn5Minutes string"
+ - "UserClickedTitleTermScoresIn30Minutes string"
+ - "UserClickedTitleTermScoresIn360Minutes string"
+ indexs:
+ - "index1:UserTermScores:RequestDatetime"
+ - "index2:UserTagScores:RequestDatetime"
+ - "index3:UserId:RequestDatetime"
+ - "index4:UserIp:RequestDatetime"
+ rows:
+ - - "InstanceKey_hdIp5qM957"
+ - 1609405780000
+ - "PageId_2qfcb9EBP4"
+ - "NewsId_ErcZw6WqZC"
+ - "CategoryId_gWDyj6FMC6"
+ - "TermScores_xppEG6AJ38"
+ - "TitleTermScores_kw3z2g2K98"
+ - "TagScores_c9zE9v08wj"
+ - "UserTagScores_84fOPfN56G"
+ - "UserTermScores_pJVZ7lPMeI"
+ - "MediaId_i0stuPP10g"
+ - 829372349
+ - 601942391
+ - "Tag_ciC6wk19PJ"
+ - -1820777477
+ - 883273961
+ - 266011166
+ - 625586443
+ - -684001291
+ - 902064193
+ - 124534625
+ - "UserId_N4VsmRmV5e"
+ - 3.7387905194494238
+ - 125.52669722380091
+ - "DeviceId_2zMD4oSYcI"
+ - "UserIp_HpEH1YJjRI"
+ - -1434651347
+ - "UserClickedMediaIdsIn1Times_3HDiJhw431"
+ - "UserClickedMediaIdsIn3Times_dMlPGtTIhR"
+ - "UserClickedMediaIdsIn10Times_av0JnzlZTG"
+ - "UserClickedMediaIdsIn1Minutes_mOktj5LJiD"
+ - "UserClickedMediaIdsIn5Minutes_9rypts8eWg"
+ - "UserClickedMediaIdsIn30Minutes_rgvXB0uxwH"
+ - "UserClickedMediaIdsIn360Minutes_c5UxGaYceL"
+ - "UserClickedCatIdsIn1Times_LeHDkid2pj"
+ - "UserClickedCatIdsIn3Times_q1NIvIEMP7"
+ - "UserClickedCatIdsIn10Times_6u8Xg7cS9F"
+ - "UserClickedCatIdsIn1Minutes_oRKjk9HTtA"
+ - "UserClickedCatIdsIn5Minutes_GdLcy4lnLO"
+ - "UserClickedCatIdsIn30Minutes_hJqHlZOlXf"
+ - "UserClickedCatIdsIn360Minutes_6E2LKw7j2O"
+ - "UserClickedTagScoresIn1Times_cAkeiQEbZi"
+ - "UserClickedTagScoresIn3Times_tFexwMHFw4"
+ - "UserClickedTagScoresIn10Times_M5J9oPpbqM"
+ - "UserClickedTagScoresIn1Minutes_F9Ba3faBRO"
+ - "UserClickedTagScoresIn5Minutes_wCSaqSRatG"
+ - "UserClickedTagScoresIn30Minutes_BzJfoCf21a"
+ - "UserClickedTagScoresIn360Minutes_30l7jaJ4gB"
+ - "UserClickedTermScoresIn1Times_LqLUppsBv0"
+ - "UserClickedTermScoresIn3Times_Lokr3ory2y"
+ - "UserClickedTermScoresIn10Times_xTZVbQqHw0"
+ - "UserClickedTermScoresIn1Minutes_pBFLuGB0p0"
+ - "UserClickedTermScoresIn5Minutes_giEJ7skHMs"
+ - "UserClickedTermScoresIn30Minutes_C8JaxDwypo"
+ - "UserClickedTermScoresIn360Minutes_Rm6L1ywrhl"
+ - "UserClickedTitleTermScoresIn1Times_JxKKWwPfnI"
+ - "UserClickedTitleTermScoresIn3Times_whxcLRU2Px"
+ - "UserClickedTitleTermScoresIn10Times_TwsUNK2E5q"
+ - "UserClickedTitleTermScoresIn1Minutes_nzkQNp1WVM"
+ - "UserClickedTitleTermScoresIn5Minutes_1YOFOVlbvh"
+ - "UserClickedTitleTermScoresIn30Minutes_IfSQLmvSqa"
+ - "UserClickedTitleTermScoresIn360Minutes_r5sD1XpY2c"
+ - - "InstanceKey_pve2h4oBmM"
+ - 1609405780000
+ - "PageId_KZoyi08pAP"
+ - "NewsId_TGIqBGEVHb"
+ - "CategoryId_Ie7ucdUYXe"
+ - "TermScores_1gZiIGPRQz"
+ - "TitleTermScores_yQyoGdHRNe"
+ - "TagScores_Y110SxqWpY"
+ - "UserTagScores_i0icat48DT"
+ - "UserTermScores_cL9G53KJhT"
+ - "MediaId_1tOAd6ZaZC"
+ - -1539942388
+ - -645368500
+ - "Tag_lstd2JNED7"
+ - -203531434
+ - -1137889304
+ - -1877229079
+ - -1849242659
+ - -1005223131
+ - 32773880
+ - -730536017
+ - "UserId_pjFJNfdPYs"
+ - 118.13343685266054
+ - -75.95372022179421
+ - "DeviceId_kmtzlnZRbc"
+ - "UserIp_pRTNUjNjpf"
+ - 186372981
+ - "UserClickedMediaIdsIn1Times_2EG9u6VG3z"
+ - "UserClickedMediaIdsIn3Times_U52gnngZpl"
+ - "UserClickedMediaIdsIn10Times_SZMJFndrWA"
+ - "UserClickedMediaIdsIn1Minutes_sUzsztqLo6"
+ - "UserClickedMediaIdsIn5Minutes_j8k1DEJ3K2"
+ - "UserClickedMediaIdsIn30Minutes_WQYr1ipJzJ"
+ - "UserClickedMediaIdsIn360Minutes_kNPuSmOLCh"
+ - "UserClickedCatIdsIn1Times_AWeuDDwzJX"
+ - "UserClickedCatIdsIn3Times_5oBau1ONjC"
+ - "UserClickedCatIdsIn10Times_nC04RrROot"
+ - "UserClickedCatIdsIn1Minutes_BCraczQzN8"
+ - "UserClickedCatIdsIn5Minutes_OYg6nwBjgB"
+ - "UserClickedCatIdsIn30Minutes_SR13pQy3Xn"
+ - "UserClickedCatIdsIn360Minutes_I8LR8qCAfD"
+ - "UserClickedTagScoresIn1Times_sLP8dEPBuF"
+ - "UserClickedTagScoresIn3Times_Z6wY8t1DdZ"
+ - "UserClickedTagScoresIn10Times_X9rXFAgUuH"
+ - "UserClickedTagScoresIn1Minutes_MazqtyoPcg"
+ - "UserClickedTagScoresIn5Minutes_16ltZzRQid"
+ - "UserClickedTagScoresIn30Minutes_pSlMAYSeYb"
+ - "UserClickedTagScoresIn360Minutes_0Zz8P4xjGH"
+ - "UserClickedTermScoresIn1Times_bvkzRyHAus"
+ - "UserClickedTermScoresIn3Times_0HMO3i4yns"
+ - "UserClickedTermScoresIn10Times_DT8xge6vdi"
+ - "UserClickedTermScoresIn1Minutes_2okBnnoBid"
+ - "UserClickedTermScoresIn5Minutes_lqNLfKvrh0"
+ - "UserClickedTermScoresIn30Minutes_ac2U74ym1H"
+ - "UserClickedTermScoresIn360Minutes_JSBVGmOT7m"
+ - "UserClickedTitleTermScoresIn1Times_xChAvlI0Hg"
+ - "UserClickedTitleTermScoresIn3Times_sASTrsDGA3"
+ - "UserClickedTitleTermScoresIn10Times_21cB10rAvK"
+ - "UserClickedTitleTermScoresIn1Minutes_SVXF4JVpJ5"
+ - "UserClickedTitleTermScoresIn5Minutes_LCLbuQVXs2"
+ - "UserClickedTitleTermScoresIn30Minutes_bwXZz631fl"
+ - "UserClickedTitleTermScoresIn360Minutes_sR95HAIcHx"
+ - - "InstanceKey_k4XtEfFsqT"
+ - 1609405780000
+ - "PageId_BZWLnCZmQ9"
+ - "NewsId_YdHfQBoErt"
+ - "CategoryId_oard5Cne0T"
+ - "TermScores_e8dAwnunlf"
+ - "TitleTermScores_8eghaLsTjR"
+ - "TagScores_Igz3roJMYt"
+ - "UserTagScores_D0noZJ4FzI"
+ - "UserTermScores_p2ZShNACkv"
+ - "MediaId_7BELEeQo8t"
+ - -1400976088
+ - -185610105
+ - "Tag_qDw3zDu0Kf"
+ - -1424703288
+ - 326020146
+ - -1788522406
+ - -894083919
+ - -614604127
+ - 836914113
+ - -514315335
+ - "UserId_cnDtbfUEMH"
+ - 77.52642088566631
+ - 61.52004136781969
+ - "DeviceId_88cLvltsp1"
+ - "UserIp_6QnBErDqMJ"
+ - -2147467600
+ - "UserClickedMediaIdsIn1Times_dfNUH5v0a6"
+ - "UserClickedMediaIdsIn3Times_7C9bV4aMUz"
+ - "UserClickedMediaIdsIn10Times_y7bSntxLJ9"
+ - "UserClickedMediaIdsIn1Minutes_PLy8SqEQ84"
+ - "UserClickedMediaIdsIn5Minutes_5BnsVlthDt"
+ - "UserClickedMediaIdsIn30Minutes_GMdEG1RRGL"
+ - "UserClickedMediaIdsIn360Minutes_Zb85hck0aF"
+ - "UserClickedCatIdsIn1Times_1WG4dLVfOH"
+ - "UserClickedCatIdsIn3Times_HuZi6EaTCV"
+ - "UserClickedCatIdsIn10Times_QPL2TWKSN3"
+ - "UserClickedCatIdsIn1Minutes_rzk3a4Klss"
+ - "UserClickedCatIdsIn5Minutes_0X05NkhD7o"
+ - "UserClickedCatIdsIn30Minutes_jYleKJf8IF"
+ - "UserClickedCatIdsIn360Minutes_ar6mj9US4t"
+ - "UserClickedTagScoresIn1Times_P2MmbiyS4I"
+ - "UserClickedTagScoresIn3Times_8StMrSWAeI"
+ - "UserClickedTagScoresIn10Times_Bl7yrclqG2"
+ - "UserClickedTagScoresIn1Minutes_DqBqyScA9d"
+ - "UserClickedTagScoresIn5Minutes_K6ZgXsqw0u"
+ - "UserClickedTagScoresIn30Minutes_6lv8OvRI7W"
+ - "UserClickedTagScoresIn360Minutes_Hs54K7u27l"
+ - "UserClickedTermScoresIn1Times_H6SHDMGtuy"
+ - "UserClickedTermScoresIn3Times_DVVW13LIcd"
+ - "UserClickedTermScoresIn10Times_dZdjYFHvpd"
+ - "UserClickedTermScoresIn1Minutes_ZTBWK0VaYf"
+ - "UserClickedTermScoresIn5Minutes_aIfxNFWfaz"
+ - "UserClickedTermScoresIn30Minutes_XkLhwMM16w"
+ - "UserClickedTermScoresIn360Minutes_VccLPVQ0kC"
+ - "UserClickedTitleTermScoresIn1Times_bM308gVgrl"
+ - "UserClickedTitleTermScoresIn3Times_4jqy1Aeiar"
+ - "UserClickedTitleTermScoresIn10Times_FQ79yzLr4K"
+ - "UserClickedTitleTermScoresIn1Minutes_enU5HDPII1"
+ - "UserClickedTitleTermScoresIn5Minutes_X0YzeMlxE1"
+ - "UserClickedTitleTermScoresIn30Minutes_WAWIp5zsTD"
+ - "UserClickedTitleTermScoresIn360Minutes_SYU1A5lgJy"
+ - - "InstanceKey_Ik6w1GJ3ak"
+ - 1609405780000
+ - "PageId_l8hTiHLe7c"
+ - "NewsId_U1l7n7Z1cz"
+ - "CategoryId_z93urYcLTz"
+ - "TermScores_05J4os5hvJ"
+ - "TitleTermScores_MGrW4hhUdP"
+ - "TagScores_1k3NEltzP4"
+ - "UserTagScores_1PHt2Sw8Z5"
+ - "UserTermScores_537uScy0i9"
+ - "MediaId_xc7NYROEZt"
+ - -1256228849
+ - -110570093
+ - "Tag_d4mRWCbrMO"
+ - 365243338
+ - 873343892
+ - 17923145
+ - -681865200
+ - -444619580
+ - -1894396283
+ - -1127215708
+ - "UserId_fgmdPtLt87"
+ - -61.396138086485564
+ - -87.37716465146411
+ - "DeviceId_CCtZyRqhvh"
+ - "UserIp_CxGseOdjSM"
+ - -76661935
+ - "UserClickedMediaIdsIn1Times_LEYaofr5Hl"
+ - "UserClickedMediaIdsIn3Times_3FSI83BEln"
+ - "UserClickedMediaIdsIn10Times_0uxy6hp2ql"
+ - "UserClickedMediaIdsIn1Minutes_iR7f3ML0Cy"
+ - "UserClickedMediaIdsIn5Minutes_5lifH8ACGz"
+ - "UserClickedMediaIdsIn30Minutes_veGUAV6ecL"
+ - "UserClickedMediaIdsIn360Minutes_4ZfwIYLjI0"
+ - "UserClickedCatIdsIn1Times_MsWvdpbriS"
+ - "UserClickedCatIdsIn3Times_OOQ3KsuFoC"
+ - "UserClickedCatIdsIn10Times_lSXIYryDz4"
+ - "UserClickedCatIdsIn1Minutes_lcgKRcqF1r"
+ - "UserClickedCatIdsIn5Minutes_APcl6yWNKU"
+ - "UserClickedCatIdsIn30Minutes_JA3aKMbLRU"
+ - "UserClickedCatIdsIn360Minutes_iRcC0hXYHY"
+ - "UserClickedTagScoresIn1Times_BsalAUhfaV"
+ - "UserClickedTagScoresIn3Times_4YgxkGeFO8"
+ - "UserClickedTagScoresIn10Times_JGEY6hnpRt"
+ - "UserClickedTagScoresIn1Minutes_qh78KhthQ9"
+ - "UserClickedTagScoresIn5Minutes_KwokIGT8ih"
+ - "UserClickedTagScoresIn30Minutes_esweRoZRlQ"
+ - "UserClickedTagScoresIn360Minutes_SEhVJL8Isv"
+ - "UserClickedTermScoresIn1Times_uiIHrsV6LB"
+ - "UserClickedTermScoresIn3Times_y3BznAylvB"
+ - "UserClickedTermScoresIn10Times_IU8v9wrb65"
+ - "UserClickedTermScoresIn1Minutes_YP8gIJCiEZ"
+ - "UserClickedTermScoresIn5Minutes_vDHmUEWZgj"
+ - "UserClickedTermScoresIn30Minutes_v3yee1Glcu"
+ - "UserClickedTermScoresIn360Minutes_7dWE2PTpRW"
+ - "UserClickedTitleTermScoresIn1Times_gnyIe4mq1F"
+ - "UserClickedTitleTermScoresIn3Times_UGzqsDJ5zr"
+ - "UserClickedTitleTermScoresIn10Times_498w6xB6Nc"
+ - "UserClickedTitleTermScoresIn1Minutes_jdo8wg4Qvj"
+ - "UserClickedTitleTermScoresIn5Minutes_u6pQFRC1AT"
+ - "UserClickedTitleTermScoresIn30Minutes_XyyNo9Vj1t"
+ - "UserClickedTitleTermScoresIn360Minutes_JlyEeiBHUZ"
+ sql: |-
+ select
+ InstanceKey as InstanceKey_1,
+ InstanceKey as t1_InstanceKey_0,
+ RequestDatetime as t1_RequestDatetime_1,
+ PageId as t1_PageId_2,
+ NewsId as t1_NewsId_3,
+ CategoryId as t1_CategoryId_4,
+ TermScores as t1_TermScores_5,
+ TitleTermScores as t1_TitleTermScores_6,
+ TagScores as t1_TagScores_7,
+ UserTagScores as t1_UserTagScores_8,
+ UserTermScores as t1_UserTermScores_9,
+ MediaId as t1_MediaId_10,
+ ContentWords as t1_ContentWords_11,
+ TitleWords as t1_TitleWords_12,
+ Tag as t1_Tag_13,
+ TotalLikes as t1_TotalLikes_14,
+ TotalDislikes as t1_TotalDislikes_15,
+ TotalComments as t1_TotalComments_16,
+ TotalImpressions as t1_TotalImpressions_17,
+ TotalAdjustImpressions as t1_TotalAdjustImpressions_18,
+ TotalClicks as t1_TotalClicks_19,
+ TotalShares as t1_TotalShares_20,
+ UserId as t1_UserId_21,
+ RequestLatitude as t1_RequestLatitude_22,
+ RequestLongitude as t1_RequestLongitude_23,
+ DeviceId as t1_DeviceId_24,
+ UserIp as t1_UserIp_25,
+ Clicked as t1_Clicked_26,
+ UserClickedMediaIdsIn1Times as t1_UserClickedMediaIdsIn1Times_27,
+ UserClickedMediaIdsIn3Times as t1_UserClickedMediaIdsIn3Times_28,
+ UserClickedMediaIdsIn10Times as t1_UserClickedMediaIdsIn10Times_29,
+ UserClickedMediaIdsIn1Minutes as t1_UserClickedMediaIdsIn1Minutes_30,
+ UserClickedMediaIdsIn5Minutes as t1_UserClickedMediaIdsIn5Minutes_31,
+ UserClickedMediaIdsIn30Minutes as t1_UserClickedMediaIdsIn30Minutes_32,
+ UserClickedMediaIdsIn360Minutes as t1_UserClickedMediaIdsIn360Minutes_33,
+ UserClickedCatIdsIn1Times as t1_UserClickedCatIdsIn1Times_34,
+ UserClickedCatIdsIn3Times as t1_UserClickedCatIdsIn3Times_35,
+ UserClickedCatIdsIn10Times as t1_UserClickedCatIdsIn10Times_36,
+ UserClickedCatIdsIn1Minutes as t1_UserClickedCatIdsIn1Minutes_37,
+ UserClickedCatIdsIn5Minutes as t1_UserClickedCatIdsIn5Minutes_38,
+ UserClickedCatIdsIn30Minutes as t1_UserClickedCatIdsIn30Minutes_39,
+ UserClickedCatIdsIn360Minutes as t1_UserClickedCatIdsIn360Minutes_40,
+ UserClickedTagScoresIn1Times as t1_UserClickedTagScoresIn1Times_41,
+ UserClickedTagScoresIn3Times as t1_UserClickedTagScoresIn3Times_42,
+ UserClickedTagScoresIn10Times as t1_UserClickedTagScoresIn10Times_43,
+ UserClickedTagScoresIn1Minutes as t1_UserClickedTagScoresIn1Minutes_44,
+ UserClickedTagScoresIn5Minutes as t1_UserClickedTagScoresIn5Minutes_45,
+ UserClickedTagScoresIn30Minutes as t1_UserClickedTagScoresIn30Minutes_46,
+ UserClickedTagScoresIn360Minutes as t1_UserClickedTagScoresIn360Minutes_47,
+ UserClickedTermScoresIn1Times as t1_UserClickedTermScoresIn1Times_48,
+ UserClickedTermScoresIn3Times as t1_UserClickedTermScoresIn3Times_49,
+ UserClickedTermScoresIn10Times as t1_UserClickedTermScoresIn10Times_50,
+ UserClickedTermScoresIn1Minutes as t1_UserClickedTermScoresIn1Minutes_51,
+ UserClickedTermScoresIn5Minutes as t1_UserClickedTermScoresIn5Minutes_52,
+ UserClickedTermScoresIn30Minutes as t1_UserClickedTermScoresIn30Minutes_53,
+ UserClickedTermScoresIn360Minutes as t1_UserClickedTermScoresIn360Minutes_54,
+ UserClickedTitleTermScoresIn1Times as t1_UserClickedTitleTermScoresIn1Times_55,
+ UserClickedTitleTermScoresIn3Times as t1_UserClickedTitleTermScoresIn3Times_56,
+ UserClickedTitleTermScoresIn10Times as t1_UserClickedTitleTermScoresIn10Times_57,
+ UserClickedTitleTermScoresIn1Minutes as t1_UserClickedTitleTermScoresIn1Minutes_58,
+ UserClickedTitleTermScoresIn5Minutes as t1_UserClickedTitleTermScoresIn5Minutes_59,
+ UserClickedTitleTermScoresIn30Minutes as t1_UserClickedTitleTermScoresIn30Minutes_60,
+ UserClickedTitleTermScoresIn360Minutes as t1_UserClickedTitleTermScoresIn360Minutes_61,
+ sum(TitleWords) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_TitleWords_62,
+ fz_top1_ratio(NewsId) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_NewsId_63,
+ sum(RequestLatitude) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_RequestLatitude_64,
+ distinct_count(NewsId) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_NewsId_65,
+ sum(ContentWords) over t1_UserTermScores_RequestDatetime_0s_7200s as t1_ContentWords_66,
+ case when !isnull(lag(UserClickedTagScoresIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn1Times_67,
+ case when !isnull(lag(UserClickedTagScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn1Times_68,
+ case when !isnull(lag(UserClickedCatIdsIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedCatIdsIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedCatIdsIn1Times_69,
+ case when !isnull(lag(UserClickedMediaIdsIn1Times, 0)) over t1_UserTermScores_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn1Times) over t1_UserTermScores_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn1Times_70,
+ case when !isnull(lag(UserClickedTagScoresIn30Minutes, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn30Minutes) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn30Minutes_71,
+ fz_top1_ratio(NewsId) over t1_UserTagScores_RequestDatetime_0s_7200s as t1_NewsId_72,
+ case when !isnull(lag(UserClickedTagScoresIn3Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn3Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn3Times_73,
+ case when !isnull(lag(UserClickedTagScoresIn10Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTagScoresIn10Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTagScoresIn10Times_74,
+ case when !isnull(lag(UserClickedCatIdsIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedCatIdsIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedCatIdsIn1Times_75,
+ case when !isnull(lag(UserClickedTitleTermScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn1Times_76,
+ case when !isnull(lag(UserClickedTermScoresIn30Minutes, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn30Minutes) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn30Minutes_77,
+ case when !isnull(lag(UserClickedTermScoresIn1Times, 0)) over t1_UserTagScores_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn1Times) over t1_UserTagScores_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn1Times_78,
+ case when !isnull(lag(UserClickedMediaIdsIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn10Times_79,
+ case when !isnull(lag(UserTermScores, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserTermScores) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserTermScores_80,
+ case when !isnull(lag(UserClickedMediaIdsIn10Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn10Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn10Times_81,
+ distinct_count(InstanceKey) over t1_UserTagScores_RequestDatetime_0s_7200s as t1_InstanceKey_82,
+ case when !isnull(lag(UserTagScores, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserTagScores) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserTagScores_83,
+ case when !isnull(lag(UserTagScores, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserTagScores) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserTagScores_84,
+ case when !isnull(lag(UserClickedTitleTermScoresIn3Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn3Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn3Times_85,
+ case when !isnull(lag(UserClickedMediaIdsIn360Minutes, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn360Minutes) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn360Minutes_86,
+ case when !isnull(lag(UserClickedTitleTermScoresIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn10Times_87,
+ case when !isnull(lag(UserClickedTitleTermScoresIn3Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedTitleTermScoresIn3Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedTitleTermScoresIn3Times_88,
+ case when !isnull(lag(UserClickedTermScoresIn3Times, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn3Times) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn3Times_89,
+ case when !isnull(lag(UserClickedMediaIdsIn360Minutes, 0)) over t1_UserIp_RequestDatetime_0s_7200s then count(UserClickedMediaIdsIn360Minutes) over t1_UserIp_RequestDatetime_0s_7200s else null end as t1_UserClickedMediaIdsIn360Minutes_90,
+ case when !isnull(lag(UserClickedTermScoresIn10Times, 0)) over t1_UserId_RequestDatetime_0s_7200s then count(UserClickedTermScoresIn10Times) over t1_UserId_RequestDatetime_0s_7200s else null end as t1_UserClickedTermScoresIn10Times_91
+ from
+ {0}
+ window t1_UserTermScores_RequestDatetime_0s_7200s as ( partition by UserTermScores order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_UserTagScores_RequestDatetime_0s_7200s as ( partition by UserTagScores order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_UserId_RequestDatetime_0s_7200s as ( partition by UserId order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW),
+ t1_UserIp_RequestDatetime_0s_7200s as ( partition by UserIp order by RequestDatetime rows_range between 7200s preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ expect:
+ success: true
diff --git a/cases/integration_test/test_batch_request.yaml b/cases/integration_test/test_batch_request.yaml
new file mode 100644
index 00000000000..9f3134806e1
--- /dev/null
+++ b/cases/integration_test/test_batch_request.yaml
@@ -0,0 +1,358 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: batch request without common column
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000,"2020-05-01","a"]
+ - [3,"a",3,32,1.2,2.2,1590738992000,"2020-05-03","c"]
+ - [5,"a",5,34,1.4,2.4,1590738994000,"2020-05-05","d"]
+ - [6,"a",6,35,1.5,2.5,1590738995000,"2020-05-06","e"]
+ batch_request:
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"]
+ - [4,"a",4,33,1.3,2.3,1590738993000,"2020-05-04","c"]
+ - [7,"a",6,36,1.6,2.6,1590738996000,"2020-05-07","f"]
+ sql: |
+ SELECT id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5,
+ sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"]
+ - [4,"a",8,95,3.5,6.5,1590738993000,"2020-05-04","a"]
+ - [7,"a",17,105,4.5,7.5,1590738996000,"2020-05-07","d"]
+
+ - id: 1
+ desc: batch request with all common columns
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000,"2020-05-01","a"]
+ - [3,"a",3,32,1.2,2.2,1590738992000,"2020-05-03","c"]
+ - [5,"a",5,34,1.4,2.4,1590738994000,"2020-05-05","d"]
+ - [6,"a",6,35,1.5,2.5,1590738995000,"2020-05-06","e"]
+ batch_request:
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 string"]
+ indexs: ["index1:c1:c7"]
+ common_column_indices: [0,1,2,3,4,5,6,7,8]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"]
+ - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"]
+ - [2,"a",2,31,1.1,2.1,1590738991000,"2020-05-02","b"]
+ sql: |
+ SELECT id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5,
+ sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9 FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"]
+ - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"]
+ - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"]
+
+ - id: 2
+ desc: batch request with non-trival common columns
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c1:c7", "index2:id:c7"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000]
+ - [3,"a",3,32,1.2,2.2,1590738992000]
+ - [5,"a",5,34,1.4,2.4,1590738994000]
+ - [6,"a",6,35,1.5,2.5,1590738995000]
+ -
+ columns : ["id int","timecol timestamp","c8 date","c9 string"]
+ indexs: ["index2:id:timecol"]
+ rows:
+ - [1,1590738990000,"2020-05-01","a"]
+ - [2,1590738991000,"2020-05-02","b"]
+ - [3,1590738992000,"2020-05-03","c"]
+ - [4,1590738993000,"2020-05-04","d"]
+ - [5,1590738994000,"2020-05-05","e"]
+ - [6,1590738995000,"2020-05-06","f"]
+ - [7,1590738996000,"2020-05-07","g"]
+ batch_request:
+ indexs: ["index1:c1:c7"]
+ common_column_indices: [1,3,5]
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738991000]
+ - [4,"a",3,31,1.2,2.1,1590738993000]
+ - [7,"a",4,31,1.3,2.1,1590738996000]
+ sql: |
+ SELECT {0}.id, c1, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4, sum(c5) OVER w1 as m5,
+ sum(c6) OVER w1 as m6, max(c7) OVER w1 as m7, max(c8) OVER w1 as m8, min(c9) OVER w1 as m9
+ FROM {0} last join {1} order by {1}.timecol on {0}.id={1}.id and {0}.c7={1}.timecol
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","m3 int","m4 bigint","m5 float","m6 double","m7 timestamp","m8 date","m9 string"]
+ rows:
+ - [2,"a",3,61,2.1,4.1,1590738991000,"2020-05-02","a"]
+ - [4,"a",7,93,3.4,6.3,1590738993000,"2020-05-04","a"]
+ - [7,"a",15,100,4.2,7.0,1590738996000,"2020-05-07","e"]
+ common_column_indices: []
+
+ - id: 3
+ desc: batch request with non-trival output common columns, window is common
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ indexs: ["index1:c1:c6", "index2:id:c6"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000]
+ - [3,"a",3,32,1.2,2.2,1590738992000]
+ - [5,"a",5,34,1.4,2.4,1590738994000]
+ - [6,"a",6,35,1.5,2.5,1590738995000]
+ -
+ columns : ["id int","timecol timestamp","c7 date","c8 string"]
+ indexs: ["index2:id:timecol"]
+ rows:
+ - [1,1590738990000,"2020-05-01","a"]
+ - [2,1590738991000,"2020-05-02","b"]
+ - [3,1590738992000,"2020-05-03","c"]
+ - [4,1590738993000,"2020-05-04","d"]
+ - [5,1590738994000,"2020-05-05","e"]
+ - [6,1590738995000,"2020-05-06","f"]
+ - [7,1590738996000,"2020-05-07","g"]
+ batch_request:
+ indexs: ["index1:c1:c6"]
+ common_column_indices: [1,3,6]
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738996000]
+ - [4,"a",3,31,1.2,2.2,1590738996000]
+ - [7,"a",4,31,1.3,2.3,1590738996000]
+ sql: |
+ SELECT {0}.id, c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4,
+ sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8
+ FROM {0} last join {1} order by {1}.timecol on {0}.id={1}.id and {0}.c6={1}.timecol
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"]
+ common_column_indices: [1,3,6]
+ rows:
+ - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-06","e"]
+ - [4,"a",14,100,4.1,7.1,1590738996000,"2020-05-06","e"]
+ - [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"]
+
+ - id: 4
+ desc: batch request with non-trival output common columns, join is common and window non-common
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ indexs: ["index1:c1:c6", "index2:id:c6"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000]
+ - [3,"a",3,32,1.2,2.2,1590738992000]
+ - [5,"a",5,34,1.4,2.4,1590738994000]
+ - [6,"a",6,35,1.5,2.5,1590738995000]
+ -
+ columns : ["id int","timecol timestamp","c1 string", "c7 date","c8 string"]
+ indexs: ["index2:c1:timecol"]
+ rows:
+ - [1,1590738990000,"a","2020-05-01","a"]
+ - [2,1590738991000,"a","2020-05-02","b"]
+ - [3,1590738992000,"a","2020-05-03","c"]
+ - [4,1590738993000,"a","2020-05-04","d"]
+ - [5,1590738994000,"a","2020-05-05","e"]
+ - [6,1590738995000,"a","2020-05-06","f"]
+ - [7,1590738996000,"a","2020-05-07","g"]
+ batch_request:
+ indexs: ["index1:c1:c6"]
+ common_column_indices: [1,3]
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738996000]
+ - [4,"a",3,31,1.2,2.2,1590738997000]
+ - [7,"a",4,31,1.3,2.3,1590738998000]
+ sql: |
+ SELECT {0}.id, {0}.c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4,
+ sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8
+ FROM {0} last join {1} order by {1}.timecol on {0}.c1={1}.c1
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"]
+ common_column_indices: []
+ rows:
+ - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-07","g"]
+ - [4,"a",14,100,4.1,7.1,1590738997000,"2020-05-07","g"]
+ - [7,"a",15,100,4.2,7.2,1590738998000,"2020-05-07","g"]
+
+ - id: 5
+ desc: batch request with non-trival output common columns, window and join are common
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ indexs: ["index1:c1:c6", "index2:id:c6"]
+ rows:
+ - [1,"a",1,30,1.0,2.0,1590738990000]
+ - [3,"a",3,32,1.2,2.2,1590738992000]
+ - [5,"a",5,34,1.4,2.4,1590738994000]
+ - [6,"a",6,35,1.5,2.5,1590738995000]
+ -
+ columns : ["id int","timecol timestamp","c1 string", "c7 date","c8 string"]
+ indexs: ["index2:c1:timecol"]
+ rows:
+ - [1,1590738990000,"a","2020-05-01","a"]
+ - [2,1590738991000,"a","2020-05-02","b"]
+ - [3,1590738992000,"a","2020-05-03","c"]
+ - [4,1590738993000,"a","2020-05-04","d"]
+ - [5,1590738994000,"a","2020-05-05","e"]
+ - [6,1590738995000,"a","2020-05-06","f"]
+ - [7,1590738996000,"a","2020-05-07","g"]
+ batch_request:
+ indexs: ["index1:c1:c6"]
+ common_column_indices: [1,3,6]
+ columns : ["id int","c1 string","c2 int","c3 bigint","c4 float","c5 double","c6 timestamp"]
+ rows:
+ - [2,"a",2,31,1.1,2.1,1590738996000]
+ - [4,"a",3,31,1.2,2.2,1590738996000]
+ - [7,"a",4,31,1.3,2.3,1590738996000]
+ sql: |
+ SELECT {0}.id, {0}.c1 as m1, sum(c2) OVER w1 as m2, sum(c3) OVER w1 as m3, sum(c4) OVER w1 as m4,
+ sum(c5) OVER w1 as m5, max(c6) OVER w1 as m6, max(c7) OVER w1 as m7, min(c8) OVER w1 as m8
+ FROM {0} last join {1} order by {1}.timecol on {0}.c1={1}.c1 and {0}.c6={1}.timecol
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","m1 string","m2 int","m3 bigint","m4 float","m5 double","m6 timestamp","m7 date","m8 string"]
+ common_column_indices: [1,3,6,7,8]
+ rows:
+ - [2,"a",13,100,4.0,7.0,1590738996000,"2020-05-07","e"]
+ - [4,"a",14,100,4.1,7.1,1590738996000,"2020-05-07","e"]
+ - [7,"a",15,100,4.2,7.2,1590738996000,"2020-05-07","e"]
+ - id: 6
+ desc: batch request with one common window and one non-common window
+ mode: disk-unsupport
+ inputs:
+ -
+ columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
+ "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"]
+ indexs: ["index1:k1:k3", "index2:k2:k4"]
+ repeat: 10
+ rows:
+ - [1,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [3,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [5,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [6,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ batch_request:
+ common_column_indices: [1,3,5,6,7]
+ columns : ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
+ "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"]
+ rows:
+ - [2,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [4,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [7,1,2,1590738991000,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ sql: |
+ SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3,
+ sum(c4) over w2 as m4, sum(c5) over w2 as m5, sum(c6) over w2 as m6
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k3 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.k2 ORDER BY {0}.k4 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ order: id
+ columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"]
+ common_column_indices: [1,2,3]
+ rows:
+ - [2, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0]
+ - [4, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0]
+ - [7, 41.0, 41.0, 41.0, 41.0, 41.0, 41.0]
+
+ - id: 7
+ desc: batch request with common window and common and non-common aggregations, window is small
+ mode: disk-unsupport
+ inputs:
+ -
+ columns: ["id int","k1 bigint","k2 timestamp",
+ "c1 double","c2 double","c3 double",
+ "c4 double","c5 double","c6 double"]
+ indexs: ["index1:k1:k2",]
+ repeat: 10
+ rows:
+ - [1,1,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ batch_request:
+ common_column_indices: [1,2,3,5,7]
+ columns : ["id int","k1 bigint","k2 timestamp",
+ "c1 double","c2 double","c3 double",
+ "c4 double","c5 double","c6 double"]
+ rows:
+ - [2,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [4,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [7,1,1590738991000,1.0,1.0,1.0,1.0,1.0,1.0]
+ sql: |
+ SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3,
+ sum(c4) over w1 as m4, sum(c5) over w1 as m5, sum(c6) over w1 as m6
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k2 ROWS_RANGE BETWEEN 20s PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ order: id
+ common_column_indices: [1,3,5]
+ columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"]
+ rows:
+ - [2, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0]
+ - [4, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0]
+ - [7, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0]
+
+ - id: 8
+ desc: batch request with one common window and one non-common window, current time == history time
+ mode: disk-unsupport
+ inputs:
+ -
+ columns: ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
+ "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"]
+ indexs: ["index1:k1:k3", "index2:k2:k4"]
+ repeat: 10
+ rows:
+ - [1,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [3,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [5,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ - [6,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ batch_request:
+ columns : ["id int","k1 bigint","k2 bigint","k3 timestamp", "k4 timestamp",
+ "c1 double","c2 double","c3 double","c4 double","c5 double","c6 double"]
+ rows:
+ - [2,1,2,1590738990000,1590738990000,1.0,1.0,1.0,1.0,1.0,1.0]
+ sql: |
+ SELECT {0}.id, sum(c1) over w1 as m1, sum(c2) over w1 as m2, sum(c3) over w1 as m3,
+ sum(c4) over w2 as m4, sum(c5) over w2 as m5, sum(c6) over w2 as m6
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.k1 ORDER BY {0}.k3 ROWS BETWEEN 10 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.k2 ORDER BY {0}.k4 ROWS BETWEEN 20 PRECEDING AND CURRENT ROW);
+ expect:
+ success: true
+ order: id
+ columns: [ "id int","m1 double","m2 double","m3 double","m4 double","m5 double","m6 double"]
+ rows:
+ - [2, 11.0, 11.0, 11.0, 21.0, 21.0, 21.0]
diff --git a/cases/integration_test/test_feature_zero_function.yaml b/cases/integration_test/test_feature_zero_function.yaml
new file mode 100644
index 00000000000..24876d3ce97
--- /dev/null
+++ b/cases/integration_test/test_feature_zero_function.yaml
@@ -0,0 +1,176 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_fz
+debugs: []
+cases:
+ - id: 1
+ desc: feature zero split utility functions
+ inputs:
+ - columns: ["id int64", "pk int64", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "k1:v1,k2:v2"]
+ - [2, 0, "k3:v3"]
+ - [3, 0, "???,,k4:v4"]
+ - [4, 0, NULL]
+ - [5, 0, "k5:v5,k5:v3"]
+ sql: |
+ SELECT id,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split(c1, ",")) else null end) over w1 as table_2_kn_0,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_key(c1, ",", ":")) else null end) over w1 as table_2_kn_1,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_value(c1, ",", ":")) else null end) over w1 as table_2_kn_2,
+ fz_join(fz_window_split(c1, ","), " ") OVER w1 AS split_and_join,
+ fz_join(fz_window_split_by_key(c1, ",", ":"), " ") OVER w1 AS split_key_and_join,
+ fz_join(fz_window_split_by_value(c1, ",", ":"), " ") OVER w1 AS split_value_and_join,
+ count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_count,
+ distinct_count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_distinct_count
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int64", "table_2_kn_0 int64", "table_2_kn_1 int64", "table_2_kn_2 int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string", "split_key_and_count int64", "split_key_and_distinct_count int64"]
+ rows:
+ - [1, 2, 2, 2, "k1:v1 k2:v2", "k1 k2", "v1 v2", 2, 2]
+ - [2, 3, 3, 3, "k3:v3 k1:v1 k2:v2", "k3 k1 k2", "v3 v1 v2", 3, 3]
+ - [3, 6, 4, 4, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4]
+ - [4, NULL, NULL, NULL, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4]
+ - [5, 8, 5, 5, "k5:v5 k5:v3 ??? k4:v4 k3:v3 k1:v1 k2:v2", "k5 k5 k4 k3 k1 k2", "v5 v3 v4 v3 v1 v2", 6, 5]
+
+ - id: 2
+ desc: feature zero split utility functions on single row
+ inputs:
+ - name: main
+ columns: ["id int64", "pk int64", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "k1:v1,k2:v2"]
+ - [2, 0, "k3:v3"]
+ - [3, 0, "???,,k4:v4"]
+ - [4, 0, NULL]
+ - [5, 0, "k5:v5,k3:v3"]
+ sql: |
+ SELECT id,
+ fz_join(fz_split(c1, ","), " ") AS split_and_join,
+ fz_join(fz_split_by_key(c1, ",", ":"), " ") AS split_key_and_join,
+ fz_join(fz_split_by_value(c1, ",", ":"), " ") AS split_value_and_join
+ FROM main;
+ expect:
+ order: id
+ columns: ["id int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string"]
+ rows:
+ - [1, "k1:v1 k2:v2", "k1 k2", "v1 v2"]
+ - [2, "k3:v3", "k3", "v3"]
+ - [3, "??? k4:v4", "k4", "v4"]
+ - [4, "", "", ""]
+ - [5, "k5:v5 k3:v3", "k5 k3", "v5 v3"]
+
+ - id: 3
+ desc: window top1 ratio
+ inputs:
+ -
+ columns : ["id bigint","pk bigint","c1 smallint","c2 int","c3 bigint","c4 float",
+ "c5 double", "c6 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 1, 1, 1, 1, 1.1, 2.1, "1:1 1:2"]
+ - [2, 1, 2, 2, 1, 1.4, 2.1, "1:1" ]
+ - [3, 1, NULL, 3, 1, 1.3, 2.3, "1:1 1:3"]
+ - [4, 2, NULL, 5, 1, NULL, NULL, "1:3"]
+ - [5, 2, 5, 4, 1, 1.5, 2.5, "1:2 1:3"]
+ sql: |
+ SELECT id,
+ fz_top1_ratio(c1) OVER w1 as r1,
+ fz_top1_ratio(c2) OVER w1 as r2,
+ fz_top1_ratio(c3) OVER w1 as r3,
+ fz_top1_ratio(c4) OVER w1 as r4,
+ fz_top1_ratio(c5) OVER w1 as r5,
+ fz_top1_ratio(fz_window_split_by_value(c6, " ", ":")) OVER w1 as r6,
+ fz_join(fz_window_split_by_value(c6, " ", ":")," ") OVER w1 as r7
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","r1 double","r2 double","r3 double","r4 double","r5 double","r6 double","r7 string"]
+ rows:
+ - [1, 1.0, 1.0, 1, 1.0, 1.0, 0.5,"1 2"]
+ - [2, 0.5, 0.5, 1, 0.5, 1.0, 0.66666666666666663,"1 1 2"]
+ - [3, 0.5, 0.33333333333333331, 1, 0.33333333333333331, 0.66666666666666663, 0.6,"1 3 1 1 2"]
+ - [4, 0, 1, 1, 0, 0, 1,"3"]
+ - [5, 1, 0.5, 1, 1.0, 1, 0.66666666666666663,"2 3 3"]
+
+ - id: 4
+ desc: Multi Top 3 Frequency
+ inputs:
+ -
+ columns : ["id bigint","pk bigint","c1 string","c2 int","c3 string","c4 float",
+ "c5 double", "c6 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 1, "1:2 4:3", 1, "1:2 1:3", 1.1, 2.1, "1:1 1:2"]
+ - [2, 1, "4:2 8:3", NULL, "1:7 1:3", 1.4, 2.1, "1:1" ]
+ - [3, 1, NULL, 2, "1:2 1:3", 1.3, 2.3, "1:1 1:3"]
+ - [4, 2, NULL, NULL, "1:8 1:3", NULL, NULL, "1:3"]
+ - [5, 2, "1:2 1:3", 5, "1:8 1:3", 1, 1.5, "1:2 1:3"]
+ sql: |
+ SELECT id,
+ fz_topn_frequency(fz_window_split_by_key(c1, " ", ":"), 3) OVER w1 as r1,
+ fz_topn_frequency(c2, 3) OVER w1 as r2,
+ fz_topn_frequency(fz_window_split(c3, ","), 3) OVER w1 as r3,
+ fz_topn_frequency(c4, 3) OVER w1 as r4,
+ fz_topn_frequency(c5, 3) OVER w1 as r5,
+ fz_topn_frequency(fz_window_split_by_value(c6, " ", ":"), 3) OVER w1 as r6,
+ fz_join(fz_window_split_by_value(c6, " ", ":")," ") OVER w1 as r7
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","r1 string","r2 string","r3 string","r4 string","r5 string","r6 string","r7 string"]
+ rows:
+ - [1, "1,4,NULL", "1,NULL,NULL", "1:2 1:3,NULL,NULL", "1.100000,NULL,NULL", "2.100000,NULL,NULL", "1,2,NULL", "1 2"]
+ - [2, "4,1,8", "1,NULL,NULL", "1:2 1:3,1:7 1:3,NULL", "1.100000,1.400000,NULL", "2.100000,NULL,NULL", "1,2,NULL","1 1 2"]
+ - [3, "4,1,8", "1,2,NULL","1:2 1:3,1:7 1:3,NULL", "1.100000,1.300000,1.400000", "2.100000,2.300000,NULL", "1,2,3","1 3 1 1 2"]
+ - [4, "", "NULL,NULL,NULL", "1:8 1:3,NULL,NULL", "NULL,NULL,NULL", "NULL,NULL,NULL", "3,NULL,NULL","3"]
+ - [5, "1,NULL,NULL", "5,NULL,NULL", "1:8 1:3,NULL,NULL", "1.000000,NULL,NULL", "1.500000,NULL,NULL", "3,2,NULL","2 3 3"]
+
+ - id: 5
+ desc: feature zero split utility functions on empty separator
+ inputs:
+ - columns: ["id int64", "pk int64", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "a"]
+ - [2, 0, "b"]
+ - [3, 0, "c"]
+ - [4, 0, NULL]
+ - [5, 0, "e"]
+ sql: |
+ SELECT id,
+ fz_join(fz_split(c1, ""), "") OVER w1 AS r1,
+ fz_join(fz_split_by_key(c1, "", ""), "") OVER w1 AS r2,
+ fz_join(fz_split_by_value(c1, "", ""), "") OVER w1 AS r3,
+ fz_join(fz_window_split(c1, ""), " ") OVER w1 AS r4,
+ fz_join(fz_window_split_by_key(c1, "", ""), " ") OVER w1 AS r5,
+ fz_join(fz_window_split_by_value(c1, "", ""), " ") OVER w1 AS r6
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int64", "r1 string", "r2 string", "r3 string", "r4 string", "r5 string", "r6 string"]
+ rows:
+ - [1, "", "", "", "", "", ""]
+ - [2, "", "", "", "", "", ""]
+ - [3, "", "", "", "", "", ""]
+ - [4, "", "", "", "", "", ""]
+ - [5, "", "", "", "", "", ""]
diff --git a/cases/integration_test/test_fz_sql.yaml b/cases/integration_test/test_fz_sql.yaml
new file mode 100644
index 00000000000..f79cecd1a27
--- /dev/null
+++ b/cases/integration_test/test_fz_sql.yaml
@@ -0,0 +1,156 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_fz
+debugs: []
+cases:
+ - id: 0
+ desc: feature zero split utility functions
+ inputs:
+ - columns: ["id int64", "pk int64", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "k1:v1,k2:v2"]
+ - [2, 0, "k3:v3"]
+ - [3, 0, "???,,k4:v4"]
+ - [4, 0, NULL]
+ - [5, 0, "k5:v5,k5:v3"]
+ sql: |
+ SELECT id,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split(c1, ",")) else null end) over w1 as table_2_kn_0,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_key(c1, ",", ":")) else null end) over w1 as table_2_kn_1,
+ identity(case when !isnull(lag(c1, 0)) then distinct_count(fz_window_split_by_value(c1, ",", ":")) else null end) over w1 as table_2_kn_2,
+ fz_join(fz_window_split(c1, ","), " ") OVER w1 AS split_and_join,
+ fz_join(fz_window_split_by_key(c1, ",", ":"), " ") OVER w1 AS split_key_and_join,
+ fz_join(fz_window_split_by_value(c1, ",", ":"), " ") OVER w1 AS split_value_and_join,
+ count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_count,
+ distinct_count(fz_window_split_by_key(c1, ",", ":")) OVER w1 AS split_key_and_distinct_count
+ FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.pk ORDER BY {0}.id ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int64", "table_2_kn_0 int64", "table_2_kn_1 int64", "table_2_kn_2 int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string", "split_key_and_count int64", "split_key_and_distinct_count int64"]
+ rows:
+ - [1, 2, 2, 2, "k1:v1 k2:v2", "k1 k2", "v1 v2", 2, 2]
+ - [2, 3, 3, 3, "k3:v3 k1:v1 k2:v2", "k3 k1 k2", "v3 v1 v2", 3, 3]
+ - [3, 6, 4, 4, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4]
+ - [4, NULL, NULL, NULL, "??? k4:v4 k3:v3 k1:v1 k2:v2", "k4 k3 k1 k2", "v4 v3 v1 v2", 4, 4]
+ - [5, 8, 5, 5, "k5:v5 k5:v3 ??? k4:v4 k3:v3 k1:v1 k2:v2", "k5 k5 k4 k3 k1 k2", "v5 v3 v4 v3 v1 v2", 6, 5]
+
+ - id: 1
+ desc: feature zero split utility functions on single row
+ inputs:
+ - name: main
+ columns: ["id int64", "pk int64", "c1 string"]
+ indexs: ["index1:pk:id"]
+ rows:
+ - [1, 0, "k1:v1,k2:v2"]
+ - [2, 0, "k3:v3"]
+ - [3, 0, "???,,k4:v4"]
+ - [4, 0, NULL]
+ - [5, 0, "k5:v5,k3:v3"]
+ sql: |
+ SELECT id,
+ fz_join(fz_split(c1, ","), " ") AS split_and_join,
+ fz_join(fz_split_by_key(c1, ",", ":"), " ") AS split_key_and_join,
+ fz_join(fz_split_by_value(c1, ",", ":"), " ") AS split_value_and_join
+ FROM main;
+ expect:
+ order: id
+ columns: ["id int64", "split_and_join string", "split_key_and_join string", "split_value_and_join string"]
+ rows:
+ - [1, "k1:v1 k2:v2", "k1 k2", "v1 v2"]
+ - [2, "k3:v3", "k3", "v3"]
+ - [3, "??? k4:v4", "k4", "v4"]
+ - [4, "", "", ""]
+ - [5, "k5:v5 k3:v3", "k5 k3", "v5 v3"]
+ - id: 2
+ desc: fz case 5 simple version debug
+ mode: rtidb-batch-unsupport
+ inputs:
+ - columns: ["id int64", "reqId string", "eventTime timestamp", "SK_ID_CURR string"]
+ indexs: ["index1:reqId:id"]
+ rows:
+ - [1, "col0", 1607473951299, "col3"]
+ - columns: [ "ingestionTime timestamp","eventTime timestamp",
+ "SK_ID_PREV string","SK_ID_CURR string",
+ "NAME_CONTRACT_TYPE string","AMT_ANNUITY double","AMT_APPLICATION double","AMT_CREDIT double","AMT_DOWN_PAYMENT double",
+ "AMT_GOODS_PRICE double","WEEKDAY_APPR_PROCESS_START string","HOUR_APPR_PROCESS_START int",
+ "FLAG_LAST_APPL_PER_CONTRACT string","NFLAG_LAST_APPL_IN_DAY int",
+ "RATE_DOWN_PAYMENT double","RATE_INTEREST_PRIMARY double","RATE_INTEREST_PRIVILEGED double",
+ "NAME_CASH_LOAN_PURPOSE string","NAME_CONTRACT_STATUS string","DAYS_DECISION int","NAME_PAYMENT_TYPE string",
+ "CODE_REJECT_REASON string","NAME_TYPE_SUITE string","NAME_CLIENT_TYPE string","NAME_GOODS_CATEGORY string",
+ "NAME_PORTFOLIO string","NAME_PRODUCT_TYPE string","CHANNEL_TYPE string","SELLERPLACE_AREA int",
+ "NAME_SELLER_INDUSTRY string","CNT_PAYMENT double","NAME_YIELD_GROUP string","PRODUCT_COMBINATION string",
+ "DAYS_FIRST_DRAWING double","DAYS_FIRST_DUE double","DAYS_LAST_DUE_1ST_VERSION double",
+ "DAYS_LAST_DUE double","DAYS_TERMINATION double",
+ "NFLAG_INSURED_ON_APPROVAL double"]
+ indexs: ["index1:SK_ID_CURR:ingestionTime"]
+ rows:
+ - [1607473951298, 1607473951298,
+ 'col2', 'col3', 'col4', 1.4, 1.4, 1.4, 1.4, 1.4, 'col10', 11, 'col12', 13, 1.4, 1.4, 1.4,
+ 'col17', 'col18', 19, 'col20', 'col21', 'col22', 'col23', 'col24', 'col25', 'col26',
+ 'col27', 28, 'col29', 1.4, 'col31', 'col32', 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
+ sql: |
+ select reqId_1, reqId_243 from ( select reqId as reqId_1 from {0} ) as out0 last join
+ ( select
+ reqId as reqId_243,
+ case when !isnull(lag(NAME_CLIENT_TYPE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_CLIENT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as f1,
+ 1 as f2,
+ fz_topn_frequency(NAME_CONTRACT_STATUS, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f3,
+ distinct_count(NAME_CONTRACT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f4,
+ fz_topn_frequency(NAME_CONTRACT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f5,
+ fz_topn_frequency(NAME_GOODS_CATEGORY, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f6,
+ distinct_count(NAME_GOODS_CATEGORY) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f7,
+ fz_topn_frequency(NAME_PAYMENT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f8,
+ case when !isnull(lag(NAME_PAYMENT_TYPE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_PAYMENT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as f9,
+ distinct_count(NAME_PORTFOLIO) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as f10,
+ fz_topn_frequency(NAME_PORTFOLIO, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PORTFOLIO_multi_top3frequency_299,
+ distinct_count(NAME_PRODUCT_TYPE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PRODUCT_TYPE_multi_unique_count_300,
+ fz_topn_frequency(NAME_PRODUCT_TYPE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_PRODUCT_TYPE_multi_top3frequency_301,
+ fz_topn_frequency(NAME_SELLER_INDUSTRY, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_SELLER_INDUSTRY_multi_top3frequency_302,
+ case when !isnull(lag(NAME_SELLER_INDUSTRY, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_SELLER_INDUSTRY) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_SELLER_INDUSTRY_multi_count_303,
+ fz_topn_frequency(NAME_TYPE_SUITE, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_TYPE_SUITE_multi_top3frequency_304,
+ case when !isnull(lag(NAME_TYPE_SUITE, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_TYPE_SUITE) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_TYPE_SUITE_multi_count_305,
+ fz_topn_frequency(NAME_YIELD_GROUP, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_NAME_YIELD_GROUP_multi_top3frequency_306,
+ case when !isnull(lag(NAME_YIELD_GROUP, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(NAME_YIELD_GROUP) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_NAME_YIELD_GROUP_multi_count_307,
+ fz_topn_frequency(PRODUCT_COMBINATION, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_PRODUCT_COMBINATION_multi_top3frequency_308,
+ case when !isnull(lag(PRODUCT_COMBINATION, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(PRODUCT_COMBINATION) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_PRODUCT_COMBINATION_multi_count_309,
+ fz_topn_frequency(SK_ID_PREV, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_SK_ID_PREV_multi_top3frequency_310,
+ distinct_count(SK_ID_PREV) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_SK_ID_PREV_multi_unique_count_311,
+ fz_topn_frequency(WEEKDAY_APPR_PROCESS_START, 3) over previous_application_SK_ID_CURR_ingestionTime_0s_32d as previous_application_WEEKDAY_APPR_PROCESS_START_multi_top3frequency_312,
+
+ case when !isnull(lag(WEEKDAY_APPR_PROCESS_START, 1)) over previous_application_SK_ID_CURR_ingestionTime_0s_32d then count(WEEKDAY_APPR_PROCESS_START) over previous_application_SK_ID_CURR_ingestionTime_0s_32d else null end as previous_application_WEEKDAY_APPR_PROCESS_START_multi_count_313
+ from
+ (select eventTime as ingestionTime, timestamp('2019-07-18 09:20:20') as eventTime, '' as SK_ID_PREV,
+ SK_ID_CURR as SK_ID_CURR, '' as NAME_CONTRACT_TYPE, double(0) as AMT_ANNUITY, double(0) as AMT_APPLICATION,
+ double(0) as AMT_CREDIT, double(0) as AMT_DOWN_PAYMENT, double(0) as AMT_GOODS_PRICE, '' as WEEKDAY_APPR_PROCESS_START,
+ int(0) as HOUR_APPR_PROCESS_START, '' as FLAG_LAST_APPL_PER_CONTRACT, int(0) as NFLAG_LAST_APPL_IN_DAY, double(0) as RATE_DOWN_PAYMENT,
+ double(0) as RATE_INTEREST_PRIMARY, double(0) as RATE_INTEREST_PRIVILEGED, '' as NAME_CASH_LOAN_PURPOSE, '' as NAME_CONTRACT_STATUS, int(0) as DAYS_DECISION,
+ '' as NAME_PAYMENT_TYPE, '' as CODE_REJECT_REASON, '' as NAME_TYPE_SUITE, '' as NAME_CLIENT_TYPE, '' as NAME_GOODS_CATEGORY, '' as NAME_PORTFOLIO, '' as NAME_PRODUCT_TYPE,
+ '' as CHANNEL_TYPE, int(0) as SELLERPLACE_AREA, '' as NAME_SELLER_INDUSTRY, double(0) as CNT_PAYMENT, '' as NAME_YIELD_GROUP, '' as PRODUCT_COMBINATION,
+ double(0) as DAYS_FIRST_DRAWING, double(0) as DAYS_FIRST_DUE, double(0) as DAYS_LAST_DUE_1ST_VERSION, double(0) as DAYS_LAST_DUE, double(0) as DAYS_TERMINATION,
+ double(0) as NFLAG_INSURED_ON_APPROVAL, reqId from {0})
+ window previous_application_SK_ID_CURR_ingestionTime_0s_32d as ( UNION (select ingestionTime,
+ eventTime, SK_ID_PREV, SK_ID_CURR, NAME_CONTRACT_TYPE, AMT_ANNUITY, AMT_APPLICATION, AMT_CREDIT, AMT_DOWN_PAYMENT, AMT_GOODS_PRICE, WEEKDAY_APPR_PROCESS_START, HOUR_APPR_PROCESS_START,
+ FLAG_LAST_APPL_PER_CONTRACT, NFLAG_LAST_APPL_IN_DAY, RATE_DOWN_PAYMENT, RATE_INTEREST_PRIMARY, RATE_INTEREST_PRIVILEGED, NAME_CASH_LOAN_PURPOSE, NAME_CONTRACT_STATUS,
+ DAYS_DECISION, NAME_PAYMENT_TYPE, CODE_REJECT_REASON, NAME_TYPE_SUITE, NAME_CLIENT_TYPE, NAME_GOODS_CATEGORY, NAME_PORTFOLIO, NAME_PRODUCT_TYPE, CHANNEL_TYPE, SELLERPLACE_AREA,
+ NAME_SELLER_INDUSTRY, CNT_PAYMENT, NAME_YIELD_GROUP, PRODUCT_COMBINATION, DAYS_FIRST_DRAWING, DAYS_FIRST_DUE, DAYS_LAST_DUE_1ST_VERSION, DAYS_LAST_DUE, DAYS_TERMINATION, NFLAG_INSURED_ON_APPROVAL,
+ '' as reqId from {1})
+ partition by SK_ID_CURR order by ingestionTime rows_range between 32d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW)) as out7 on out0.reqId_1 = out7.reqId_243 ;
+ expect:
+ success: true
+ columns: ["reqId_1 string", "reqId_243 string"]
+ rows:
+ - ["col0", "col0"]
diff --git a/cases/integration_test/test_index_optimized.yaml b/cases/integration_test/test_index_optimized.yaml
new file mode 100644
index 00000000000..78e05a96131
--- /dev/null
+++ b/cases/integration_test/test_index_optimized.yaml
@@ -0,0 +1,184 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: window optimized one key one ts
+ inputs:
+ - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ]
+ indexs: ["index1:c1:c6" ]
+ rows:
+ - [ "aa",1, 1590738990000, 1590738990000 ]
+ - [ "aa",2, 1590738991000, 1590738991000 ]
+ - [ "aa",3, 1590738992000, 1590738992000 ]
+ - [ "aa",4, 1590738993000, 1590738993000 ]
+ - [ "aa",5, 1590739001000, 1590738994000 ]
+ - [ "aa",6, 1590739002000, 1590738995000 ]
+ sql: |
+ SELECT c1, c3, c6, c7,
+ count(c1) OVER w1 as w1_cnt
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW);
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint"]
+ rows:
+ - [ "aa", 1, 1590738990000, 1590738990000, 1]
+ - [ "aa", 2, 1590738991000, 1590738991000, 2]
+ - [ "aa", 3, 1590738992000, 1590738992000, 3]
+ - [ "aa", 4, 1590738993000, 1590738993000, 4]
+ - [ "aa", 5, 1590739001000, 1590738994000, 1]
+ - [ "aa", 6, 1590739002000, 1590738995000, 2]
+ - id: 1
+ desc: window optimized different key same ts
+ inputs:
+ - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ]
+ indexs: ["index0:c3:c6", "index1:c1:c6" ]
+ rows:
+ - [ "aa",1, 1590738990000, 1590738990000 ]
+ - [ "aa",2, 1590738991000, 1590738991000 ]
+ - [ "aa",3, 1590738992000, 1590738992000 ]
+ - [ "aa",4, 1590738993000, 1590738993000 ]
+ - [ "aa",5, 1590739001000, 1590738994000 ]
+ - [ "aa",6, 1590739002000, 1590738995000 ]
+ sql: |
+ SELECT c1, c3, c6, c7,
+ count(c1) OVER w1 as w1_cnt
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW);
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint"]
+ rows:
+ - [ "aa", 1, 1590738990000, 1590738990000, 1]
+ - [ "aa", 2, 1590738991000, 1590738991000, 2]
+ - [ "aa", 3, 1590738992000, 1590738992000, 3]
+ - [ "aa", 4, 1590738993000, 1590738993000, 4]
+ - [ "aa", 5, 1590739001000, 1590738994000, 1]
+ - [ "aa", 6, 1590739002000, 1590738995000, 2]
+ - id: 2
+ desc: window optimized same key different ts
+ inputs:
+ - columns: [ "c1 string","c3 int","c6 timestamp","c7 timestamp" ]
+ indexs: [ "index0:c3:c7", "index1:c3:c6", "index2:c1:c7", "index3:c1:c6" ]
+ rows:
+ - [ "aa",1, 1590738990000, 1590738990000 ]
+ - [ "aa",2, 1590738991000, 1590738991000 ]
+ - [ "aa",3, 1590738992000, 1590738992000 ]
+ - [ "aa",4, 1590738993000, 1590738993000 ]
+ - [ "aa",5, 1590739001000, 1590738994000 ]
+ - [ "aa",6, 1590739002000, 1590738995000 ]
+ sql: |
+ SELECT c1, c3, c6, c7,
+ count(c1) OVER w1 as w1_cnt,
+ count(c1) OVER w2 as w2_cnt
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW);
+ request_plan: |
+ SIMPLE_PROJECT(sources=(c1, c3, c6, c7, w1_cnt, w2_cnt))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c6, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index3)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index2)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c6 timestamp", "c7 timestamp", "w1_cnt bigint", "w2_cnt bigint" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1590738990000, 1, 1 ]
+ - [ "aa", 2, 1590738991000, 1590738991000, 2, 2 ]
+ - [ "aa", 3, 1590738992000, 1590738992000, 3, 3 ]
+ - [ "aa", 4, 1590738993000, 1590738993000, 4, 4 ]
+ - [ "aa", 5, 1590739001000, 1590738994000, 1, 4 ]
+ - [ "aa", 6, 1590739002000, 1590738995000, 2, 4 ]
+ - id: 3
+ desc: LastJoin optimized one key one ts
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: [ "index1:c1:c3", "index2:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738990000 ]
+ - [ "aa",21,131,1590738989000 ]
+ - [ "bb",41,151,1590738988000 ]
+ sql: |
+ select {0}.c1,{0}.c2, t1.c3 as t1_c3, t1.c4 as t1_c4 from {0}
+ last join {1} as t1 ORDER BY t1.c3 on {0}.c1 = t1.c1;
+ request_plan: |
+ SIMPLE_PROJECT(sources=(auto_t0.c1, auto_t0.c2, t1.c3 -> t1_c3, t1.c4 -> t1_c4))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","t1_c3 bigint","t1_c4 timestamp" ]
+ rows:
+ - [ "aa",2, 131, 1590738989000]
+ - [ "bb",21,151, 1590738988000]
+ - id: 4
+ desc: LastJoin optimized one key two ts
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp"]
+ indexs: [ "index0:c2:c3", "index1:c1:c3", "index2:c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738990000 ]
+ - [ "aa",21,131,1590738989000 ]
+ - [ "bb",41,151,1590738988000 ]
+ sql: |
+ select {0}.c1,{0}.c2, t1.c3 as t1_c3, t1.c4 as t1_c4, t2.c3 as t2_c3, t2.c4 as t2_c4 from {0}
+ last join {1} as t1 ORDER BY t1.c3 on {0}.c1 = t1.c1
+ last join {1} as t2 ORDER BY t2.c4 on {0}.c1 = t2.c1;
+ request_plan: |
+ SIMPLE_PROJECT(sources=(auto_t0.c1, auto_t0.c2, t1.c3 -> t1_c3, t1.c4 -> t1_c4, t2.c3 -> t2_c3, t2.c4 -> t2_c4))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1))
+ REQUEST_JOIN(type=LastJoin, right_sort=(ASC), condition=, left_keys=(), right_keys=(), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index1)
+ RENAME(name=t2)
+ DATA_PROVIDER(type=Partition, table=auto_t1, index=index2)
+ expect:
+ order: c1
+ columns: [ "c1 string","c2 int","t1_c3 bigint","t1_c4 timestamp", "t2_c3 bigint","t2_c4 timestamp" ]
+ rows:
+ - [ "aa",2, 131, 1590738989000, 13, 1590738990000 ]
+ - [ "bb",21,151, 1590738988000, 151,1590738988000 ]
diff --git a/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml b/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml
new file mode 100644
index 00000000000..f03b0d0235a
--- /dev/null
+++ b/cases/integration_test/test_performance_insensitive/test_performance_insensitive.yaml
@@ -0,0 +1,401 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ - id: 0
+ desc: where不命中素索引=
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c2=20;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 1
+ desc: where不命中素索引==
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c2==20;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 2
+ desc: where不命中索引不等值查询
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c2>20;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "bb", 21, 31, 1590738990000 ]
+ - [ "dd", 41, 51, 1590738990000 ]
+ - id: 3
+ desc: where两个条件第一个命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c1='aa' and c2>2;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 4
+ desc: where命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c1='bb';
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "bb", 21, 31, 1590738990000 ]
+ - id: 5
+ desc: where两个条件第二个命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c1='aa' and c2>2;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 6
+ desc: where两个条件都命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4","index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c1='aa' and c2>2;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 7
+ desc: where两个条件都不命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",20,30,1590738991000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "dd",41,51,1590738990000 ]
+ sql: select * from {0} where c1='aa' and c2>2;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 20, 30, 1590738991000 ]
+ - id: 8
+ desc: lastjoin-拼表条件没有命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} order by {1}.c4 on {0}.c1={1}.c1;
+ expect:
+ columns: [ "c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [ "aa", 2, 13, 1590738989000 ]
+ - [ "bb", 21, 131, 1590738990000 ]
+ - id: 9
+ desc: Last Join 无order by, 拼表条件命中部分的组合索引(前缀索引)
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int", "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ 1,"aa",2,3,1590738989000 ]
+ - [ 2,"aa",20,30,1590738991000 ]
+ - [ 3,"bb",21,31,1590738990000 ]
+ - [ 4,"dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1|c2:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.id,{0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4;
+ expect:
+ order: id
+ columns: [ "id int","c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [1, "aa", 2, 13, 1590738989000 ]
+ - [2, "aa", 20, 15, 1590738991000 ]
+ - [3, "bb", 21, 131, 1590738990000 ]
+ - [4, "dd", 41, null, null ]
+ - id: 10
+ desc: Last Join 无order by, 拼表条件命中部分的组合索引(后缀索引)
+ mode: offline-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ 1,"aa",2,3,1590738989000 ]
+ - [ 2,"aa",20,30,1590738991000 ]
+ - [ 3,"bb",21,31,1590738990000 ]
+ - [ 4,"dd",41,51,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2|c1:c4" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "aa",3,14,1590738990000 ]
+ - [ "aa",4,15,1590738991000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,121,1590738991000 ]
+ sql: select {0}.id,{0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} on {0}.c1={1}.c1 and {0}.c4={1}.c4;
+ expect:
+ order: id
+ columns: [ "id int","c1 string", "c2 int", "c3 bigint", "c4 timestamp" ]
+ rows:
+ - [1, "aa", 2, 13, 1590738989000 ]
+ - [2, "aa", 20, 15, 1590738991000 ]
+ - [3, "bb", 21, 131, 1590738990000 ]
+ - [4, "dd", 41, null, null ]
+ - id: 11
+ desc: 不等值拼接-未命中索引
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c2:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "bb",21,31,1590738990000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - [ "bb",21,31,1590738992000 ]
+ - [ "bb",21,31,1590738991000 ]
+ sql: select {0}.c1,{0}.c2,{1}.c3,{1}.c4 from {0} last join {1} ORDER BY {1}.c4 on {0}.c3<{1}.c3;
+ expect:
+ columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ order: c1
+ rows:
+ - [ "aa",2,32,1590738993000 ]
+ - [ "bb",21,32,1590738993000 ]
+ - id: 12
+ desc: 两个子查询lastjoin-子查询带窗口特征-没有使用索引-不带orderby
+ mode: offline-unsupport
+ tags: ["offline-unsupport, @chendihao", "离线结果不对"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-02"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ sql: |
+ select id,t2.c1,t2.c3,t1.c4, t2.w2_c3_sum, t1.w3_c4_sum from
+ (select id,c1,c3,c4,c7,c8,sum({0}.c3) OVER w2 as w2_c3_sum from {0} WINDOW w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)) as t2
+ last join (select c1,c4,c7,c8,sum({0}.c4) OVER w3 as w3_c4_sum from {0} WINDOW w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)) as t1
+ on t2.c8=t1.c8
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint", "w2_c3_sum int", "w3_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30, 20, 30]
+ - [2,"aa",21,31, 41, 30]
+ - [3,"aa",22,32, 63, 33]
+ - [4,"bb",23,33, 23, 33]
+ - [5,"bb",24,34, 47, 33]
+ -
+ id: 14
+ desc: rows-float为partition by-未命中索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 15
+ desc: rows-double为partition by-未命中索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 16
+ desc: rows-int为partition by-未命中索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 17
+ desc: rows_range-float为partition by-未命中索引
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 18
+ desc: rows_range-double为partition by-未命中索引
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 19
+ desc: rows_range-int为partition by-未命中索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 20
+ desc: 样本表使用索引,UNION表未命中索引
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
\ No newline at end of file
diff --git a/cases/integration_test/tmp/test_current_time.yaml b/cases/integration_test/tmp/test_current_time.yaml
new file mode 100644
index 00000000000..528113cf3e5
--- /dev/null
+++ b/cases/integration_test/tmp/test_current_time.yaml
@@ -0,0 +1,106 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ - id: 0
+ desc: ts列的值为0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,0,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,31 ]
+ - [ "aa",22,32 ]
+ - [ "aa",23,33 ]
+ - [ "bb",24,34 ]
+ - id: 1
+ desc: ts列的值为0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 2
+ desc: ts列的值为-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,-1,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,31 ]
+ - [ "aa",22,32 ]
+ - [ "aa",23,33 ]
+ - [ "bb",24,34 ]
+# - id: 2
+# desc: ts列的值为1
+# inputs:
+# - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+# indexs: [ "index1:c1:c7" ]
+# rows:
+# - [ "aa",20,30,1.1,2.1,1,"2020-05-01" ]
+# - [ "aa",21,31,1.2,2.2,1,"2020-05-02" ]
+# - [ "aa",22,32,1.3,2.3,1,"2020-05-03" ]
+# - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+# - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+# sql: |
+# SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+# expect:
+# order: c3
+# columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+# rows:
+# - [ "aa",20,30 ]
+# - [ "aa",21,31 ]
+# - [ "aa",22,32 ]
+# - [ "aa",23,33 ]
+# - [ "bb",24,34 ]
diff --git a/cases/integration_test/ut_case/test_unique_expect.yaml b/cases/integration_test/ut_case/test_unique_expect.yaml
new file mode 100644
index 00000000000..61865e1a2f0
--- /dev/null
+++ b/cases/integration_test/ut_case/test_unique_expect.yaml
@@ -0,0 +1,56 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: ts乱序
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ unequalExpect:
+ batch_expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ request_expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",32]
+ - [4,"aa",33]
+ - [5,"aa",99]
diff --git a/cases/integration_test/v040/test_execute_mode.yaml b/cases/integration_test/v040/test_execute_mode.yaml
new file mode 100644
index 00000000000..dabae313d0d
--- /dev/null
+++ b/cases/integration_test/v040/test_execute_mode.yaml
@@ -0,0 +1,81 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: 测试EXECUTE_MODE=online
+ sqls:
+ - set @@SESSION.execute_mode="online";
+ - show variables;
+ expect:
+ columns: ["Variable_name string","Value string"]
+ rows:
+ - ["execute_mode","online"]
+ -
+ id: 1
+ desc: EXECUTE_MODE=offline
+ sqls:
+ - set @@SESSION.execute_mode="offline";
+ - show variables;
+ expect:
+ columns: ["Variable_name","Value"]
+ rows:
+ - ["execute_mode","offline"]
+ -
+ id: 2
+ desc: EXECUTE_MODE为其他字符
+ sqls:
+ - set @@SESSION.execute_olol = "offline";
+ - show variables;
+ expect:
+ success: false
+ -
+ id: 3
+ desc: EXECUTE_MODE为小写
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - show variables;
+ expect:
+ success: false
+ -
+ id: 4
+ desc: EXECUTE_MODE=online,创建表,insert数据,并查询
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - insert into {0} value ("aa",1,2,1590738989000);
+ - select * from {0};
+ expect:
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ -
+ id: 5
+ desc: EXECUTE_MODE=offline,创建表,insert数据,并查询
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - insert into {0} values ("bb",2,3,1590738989000);
+ - select * from {0};
+ expect:
+ colunms: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["bb",2,3,1590738989000]
\ No newline at end of file
diff --git a/cases/integration_test/v040/test_groupby.yaml b/cases/integration_test/v040/test_groupby.yaml
new file mode 100644
index 00000000000..7150588bedd
--- /dev/null
+++ b/cases/integration_test/v040/test_groupby.yaml
@@ -0,0 +1,560 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+sqlDialect: ["HybridSQL"]
+cases:
+ - id: 0
+ desc: "group by一个索引列"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",2]
+ - ["bb",2]
+ - ["a%",1]
+ - id: 1
+ desc: "group by一个非索引列"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",2]
+ - ["bb",2]
+ - ["a%",1]
+ - id: 2
+ desc: "group by 两个列,组合索引"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1,c2:c7"]
+ rows:
+ - [1,"aa",11,1590738990000]
+ - [2,"bb",11,1590738991000]
+ - [3,"aa",12,1590738992000]
+ - [4,"a%",11,1590738993000]
+ - [5,"bb",11,1590738994000]
+ - [6,"aa",11,1590738995000]
+ sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
+ expect:
+ order: c1
+ columns: ["c1 string","c2 int","v1 bigint"]
+ rows:
+ - ["aa",11,2]
+ - ["bb",11,2]
+ - ["a%",11,1]
+ - ["aa",12,1]
+ - id: 3
+ desc: "group by int类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,11,1590738990000]
+ - [2,22,1590738991000]
+ - [3,11,1590738992000]
+ - [4,33,1590738993000]
+ - [5,22,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 int","v1 bigint"]
+ rows:
+ - [11,2]
+ - [22,2]
+ - [33,1]
+ - id: 4
+ desc: "group by bigint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bigint","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,11,1590738990000]
+ - [2,22,1590738991000]
+ - [3,11,1590738992000]
+ - [4,33,1590738993000]
+ - [5,22,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 bigint","v1 bigint"]
+ rows:
+ - [11,2]
+ - [22,2]
+ - [33,1]
+ - id: 5
+ desc: "group by smallint类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 smallint","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,11,1590738990000]
+ - [2,22,1590738991000]
+ - [3,11,1590738992000]
+ - [4,33,1590738993000]
+ - [5,22,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 smallint","v1 bigint"]
+ rows:
+ - [11,2]
+ - [22,2]
+ - [33,1]
+ - id: 6
+ desc: "group by float类型"
+ mode: request-unsupport
+ inputs:
+ -
+ columns: ["id bigint","c1 float","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,11.1,1590738990000]
+ - [2,22.1,1590738991000]
+ - [3,11.1,1590738992000]
+ - [4,33.1,1590738993000]
+ - [5,22.1,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ success: false
+ - id: 7
+ desc: "group by double类型"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 double","c7 timestamp"]
+ indexs: ["index1:id:c7"]
+ rows:
+ - [1,11.1,1590738990000]
+ - [2,22.1,1590738991000]
+ - [3,11.1,1590738992000]
+ - [4,33.1,1590738993000]
+ - [5,22.1,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ success: false
+ - id: 8
+ desc: "group by date类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 date","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"2020-05-01",1590738990000]
+ - [2,"2020-05-02",1590738991000]
+ - [3,"2020-05-01",1590738992000]
+ - [4,"2020-05-03",1590738993000]
+ - [5,"2020-05-02",1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 date","v1 bigint"]
+ rows:
+ - ["2020-05-01",2]
+ - ["2020-05-02",2]
+ - ["2020-05-03",1]
+ - id: 9
+ desc: "group by timestamp类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 timestamp","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,11,1590738990000]
+ - [2,22,1590738991000]
+ - [3,11,1590738992000]
+ - [4,33,1590738993000]
+ - [5,22,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 timestamp","v1 bigint"]
+ rows:
+ - [11,2]
+ - [22,2]
+ - [33,1]
+ - id: 10
+ desc: "group by bool类型"
+ inputs:
+ -
+ columns : ["id bigint","c1 bool","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,true,1590738990000]
+ - [2,false,1590738991000]
+ - [3,false,1590738992000]
+ - [4,true,1590738993000]
+ - [5,true,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 bool","v1 bigint"]
+ rows:
+ - [true,3]
+ - [false,2]
+ - id: 11
+ desc: "列有空串和null"
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"",1590738990000]
+ - [2,null,1590738991000]
+ - [3,"",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,null,1590738994000]
+ sql: select c1,count(*) as v1 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["",2]
+ - [null,2]
+ - ["a%",1]
+ - id: 12
+ desc: "group by 两个列,其中一个列有索引"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1,c2:c7"]
+ rows:
+ - [1,"aa",11,1590738990000]
+ - [2,"bb",11,1590738991000]
+ - [3,"aa",12,1590738992000]
+ - [4,"a%",11,1590738993000]
+ - [5,"bb",11,1590738994000]
+ - [6,"aa",11,1590738995000]
+ sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
+ expect:
+ order: c1
+ columns: ["c1 string","c2 int","v1 bigint"]
+ rows:
+ - ["aa",11,2]
+ - ["bb",11,2]
+ - ["a%",11,1]
+ - ["aa",12,1]
+ - id: 13
+ desc: "group by 两个列,两个索引"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7","index2:c2:c7"]
+ rows:
+ - [1,"aa",11,1590738990000]
+ - [2,"bb",11,1590738991000]
+ - [3,"aa",12,1590738992000]
+ - [4,"a%",11,1590738993000]
+ - [5,"bb",11,1590738994000]
+ - [6,"aa",11,1590738995000]
+ sql: select c1,c2,count(*) as v1 from {0} group by c1,c2;
+ expect:
+ columns: ["c1 string","c2 int","v1 bigint"]
+ rows:
+ - ["aa",12,1]
+ - ["bb",11,2]
+ - ["aa",11,2]
+ - ["a%",11,1]
+
+ - id: 14
+ desc: "select的列不在group by后面"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select id,c1,count(*) as v1 from {0} group by c1;
+ expect:
+ success: false
+ - id: 15
+ desc: "group by结合count/sum/max/min/avg"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1,max(c2) as v2,min(c2) as v3,avg(c2) as v4,sum(c2) as v5 from {0} group by c1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint","v2 int","v3 int","v4 double","v5 int"]
+ rows:
+ - ["aa",3,6,1,3.333333,10]
+ - ["bb",2,5,2,3.5,7]
+ - ["cc",1,4,4,4,4]
+ - id: 16
+ desc: "select的列不在group by后面"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select c2,count(*) as v1 from {0} group by c2;
+ expect:
+ success: false
+ - id: 17
+ desc: "group by结合having"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1 from {0} group by c1 having count(c2)>1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",3]
+ - ["bb",2]
+ - id: 18
+ desc: "group by结合having,使用别名"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1 from {0} group by c1 having v1>1;
+ expect:
+ success: false
+ - id: 19
+ desc: "group by使用where根据粗函数筛选"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1 from {0} group by c1 where count(c2)>1;
+ expect:
+ success: false
+ - id: 20
+ desc: "group by结合where"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1 from {0} group by c1 where c1='aa';
+ expect:
+ success: false
+ - id: 21
+ desc: lastjoin后group by
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "bb",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select {0}.c1,sum({1}.c3) as v1 from {0} last join {1} ORDER BY {1}.c3 on {0}.c1={1}.c1 group by {0}.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","v1 bigint"]
+ rows:
+ - [ "aa",26 ]
+ - [ "cc",151 ]
+ - id: 22
+ desc: group by在lastjoin
+ mode: request-unsupport
+ inputs:
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c4" ]
+ rows:
+ - [ "aa",2,3,1590738989000 ]
+ - [ "aa",21,31,1590738990000 ]
+ - [ "cc",41,51,1590738991000 ]
+ - columns: [ "c1 string","c2 int","c3 bigint","c4 timestamp" ]
+ indexs: [ "index1:c1:c3" ]
+ rows:
+ - [ "aa",2,13,1590738989000 ]
+ - [ "cc",21,131,1590738990000 ]
+ - [ "cc",41,151,1590738992000 ]
+ sql: select t1.c1,t1.v1,t2.v1 from (select c1,sum(c2) as v1 from {0} group by c1) as t1 last join (select c1,sum(c2) as v1 from {1} group by c1) as t2 on t1.c1=t2.c1;
+ expect:
+ order: c1
+ columns: [ "c1 string","v1 int","v1 int"]
+ rows:
+ - [ "aa",23,2 ]
+ - [ "cc",41,62 ]
+ -
+ id: 23
+ desc: winhow后group by
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, max(sum(c4) OVER w1) as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) group by c1;
+ expect:
+ success: false
+ - id: 24
+ desc: "子查询后group by"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select c1,count(*) as v1 from (select * from {0}) as t group by c1;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",2]
+ - ["bb",2]
+ - ["a%",1]
+ - id: 25
+ desc: "group by后在子查询"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select * from (select c1,count(*) as v1 from {0} group by c1);
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",2]
+ - ["bb",2]
+ - ["a%",1]
+ - id: 26
+ desc: "group by where后面使用组函数别名"
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c2 int","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,1590738990000]
+ - [2,"bb",2,1590738991000]
+ - [3,"aa",3,1590738992000]
+ - [4,"cc",4,1590738993000]
+ - [5,"bb",5,1590738994000]
+ - [6,"aa",6,1590738995000]
+ sql: select c1,count(c2) as v1 from {0} group by c1 where v1>1;
+ expect:
+ success: false
+ - id: 27
+ desc: "group by后在子查询,使用where"
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738990000]
+ - [2,"bb",1590738991000]
+ - [3,"aa",1590738992000]
+ - [4,"a%",1590738993000]
+ - [5,"bb",1590738994000]
+ sql: select * from (select c1,count(*) as v1 from {0} group by c1) where v1=2;
+ expect:
+ order: c1
+ columns: ["c1 string","v1 bigint"]
+ rows:
+ - ["aa",2]
+ - ["bb",2]
+
+
+
+
+
diff --git a/cases/integration_test/v040/test_job.yaml b/cases/integration_test/v040/test_job.yaml
new file mode 100644
index 00000000000..74b6a0fd4a4
--- /dev/null
+++ b/cases/integration_test/v040/test_job.yaml
@@ -0,0 +1,176 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debus: []
+cases:
+ -
+ id: 0
+ desc: show jobs
+ sqls:
+ - use __INTERNAL_DB;
+ - set @@SESSION.execute_mode = "offline";
+ - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - show jobs;
+ expects:
+ columns: ["JOBID string","JOB_TYPE string","STATUS string"]
+ rows:
+ - ["JOB-11220021","OFFLINE LOAD","RUNNING"]
+ -
+ id: 1
+ desc: showjobs
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - use test_zw;
+ - set @@SESSION.execute_mode = "offline";
+ - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - showjobs;
+ expects:
+ success: false
+ -
+ id: 2
+ desc: 切换不同的db,show jobs
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - use test_zw;
+ - set @@SESSION.execute_mode = "offline";
+ - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - use other_db;
+ - show jobs;
+ expects:
+ columns: ["JOBID string","JOB_TYPE string","STATUS string"]
+ rows:
+ - ["JOB-11220021","OFFLINE LOAD","RUNNING"]
+ -
+ id: 3
+ desc: show job jobID
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - show job 1;
+ expects:
+ columns: ["JOBID string","JOB_TYPE string","URL string","CONTENT string"]
+ rows:
+ - ["JOB-11220021","OFFLINE LOAD","xxxx","LOAD DATA INFILE"]
+ -
+ id: 4
+ desc: jobID不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - show job 1111;
+ expects:
+ -
+ id: 5
+ desc: 语法错误
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - insert into JOB_INFO values (1,'SparkBatchSql','Running',11111,22222,'','local','application_1111','');
+ - show jobe 1;
+ expects:
+ success: false
+ -
+ id: 6
+ desc: delete job jobID
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - delete job JOB-11220021;
+ expects:
+ -
+ id: 7
+ desc: jobID不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - delete job JOB-xxxxxx;
+ expects:
+ -
+ id: 8
+ desc: 语法错误
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - delete jobe JOB-11220021;
+ expects:
+ -
+ id: 9
+ desc: stop job jobID
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - set @@SESSION.execute_mode="offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append');
+ - stop job JOB-11220021;
+ expects:
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ rows:
+ - [1,"ImportOfflineData","STOPPED","","","load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append');",
+ "local","local-1640683224470",""]
+ -
+ id: 10
+ desc: jobID不存在
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - stop job JOB-xxxxxx;
+ expects:
+ success: false
+ -
+ id: 11
+ desc: 语法错误
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - stop jobe JOB-11220021;
+ expects:
+ success: false;
\ No newline at end of file
diff --git a/cases/integration_test/v040/test_load_data.yaml b/cases/integration_test/v040/test_load_data.yaml
new file mode 100644
index 00000000000..41a446a8e76
--- /dev/null
+++ b/cases/integration_test/v040/test_load_data.yaml
@@ -0,0 +1,467 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+db: test_zw
+debugs: []
+cases:
+ -
+ id: 0
+ desc: Load data 集群版,EXECUTE_MODE=online,load parquet文件
+ inputs:
+ -
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp timestamp,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0};
+ - SHOW JOBS;
+ expect:
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+
+ -
+ id: 1
+ desc: 集群版,EXECUTE_MODE=offline,load parquet文件
+ inputs:
+ -
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp timestamp,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='append');
+ - SHOW JOBS;
+ expect:
+
+
+ -
+ id: 2
+ desc: 集群版,EXECUTE_MODE=online,load csv文件,mode默认不写
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0};
+ - SHOW JOBS;
+ expect:
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ rows:
+ - [1,"ImportOfflineData","FINISHED","","","load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table test_smoke options(deep_copy=true,mode='append');",
+ "local","local-1640683224470",""]
+ -
+ id: 3
+ desc: 集群版,execute_mode=online, load csv文件,mode=append
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(mode='append');
+ - SHOW JOBS;
+ expect:
+
+ -
+ id: 4
+ desc: 集群版,execute_mode=online, load csv文件,mode=overwrite
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(mode='overwrite');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 5
+ desc: 集群版,集群版 execute_mode=offline, load csv文件,deep_copy=true, mode默认不写,如果文件不存在
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true);
+ - SHOW JOBS;
+ expect:
+
+ -
+ id: 6
+ desc: 集群版 execute_mode=offline, load csv文件,deep_copy=true, mode默认不写,如果文件存在
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true);
+ - SHOW JOBS;
+ expect:
+
+ -
+ id: 7
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=true, mode=append
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true,mode='append');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 8
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=true, mode=overwrite
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=true,mode='overwrite');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 9
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false, mode=append
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false,mode='append');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 10
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false, mode=overwrite
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false,mode='overwrite');
+ - desc {0};
+ expect:
+ -
+ id: 11
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false,mode默认不写,没有load
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false);
+ - desc {0};
+ expect:
+ -
+ id: 12
+ desc: 集群版 execute_mode=offline, load csv文件, deep_copy=false,mode默认不写,已经load过
+ inputs:
+ -
+ columns: ["id int","c1_smallint smallint","c2_int int","c3_bigint bigint","c4_float float","c5_double double","c6_string string","c7_timestamp bigint","c8_date date","c9_bool bool"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp bigint,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - load data infile 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/csv-import-10000-1.csv' into table {0} options(deep_copy=false);
+ - desc {0};
+ expect:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+ id: 4
+ desc: 集群版,EXECUTE_MODE=offline,load parquet文件,method=duplicate
+ inputs:
+ -
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp timestamp,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='duplicate');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 5
+ desc: 集群版,EXECUTE_MODE=online,load parquet文件,method=duplicate
+ inputs:
+ -
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp timestamp,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='duplicate');
+ - SHOW JOBS;
+ expect:
+ -
+ id: 6
+ desc: 集群版,EXECUTE_MODE=offline,load parquet文件,method=symbolic_link
+ inputs:
+ -
+ columns: ["id int","job_state string","state string","start_time timestamp","end_time timestamp","parameter string","cluster string","application_id string","error string"]
+ create: |
+ create table {0}(
+ id int,
+ c1_smallint smallint,
+ c2_int int,
+ c3_bigint bigint,
+ c4_float float,
+ c5_double double,
+ c6_string string,
+ c7_timestamp timestamp,
+ c8_date date,
+ c9_bool bool,
+ index(key=c6_string,ts=c7_timestamp,ttl=0m,ttl_type=absolute)
+ )options(partitionnum = 1,replicanum = 1);
+ sqls:
+ - set @@SESSION.execute_mode = "offline";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',deep_copy=false);
+ - SHOW JOBS;
+ expect:
+ -
+ id: 7
+ desc: 集群版,EXECUTE_MODE=online,load parquet文件,method=symbolic_link
+ inputs:
+ -
+ columns: ["c1 string","c2 smallint","c3 int","c4 timestamp"]
+ rows:
+ - ["aa",1,2,1590738989000]
+ sqls:
+ - set @@SESSION.execute_mode = "online";
+ - LOAD DATA INFILE 'hdfs://m7-common-cdh02:8022/user/zhaowei/openmldb/load_data/user/zhaowei/openmldb/load_data/parquet' INTO TABLE {0} options(format='parquet',foo='bar',deep_copy=false,header=false,mode='symbolic_link');
+ - SHOW JOBS;
+ expect:
+
+
+
diff --git a/cases/integration_test/v040/test_out_in_offline.yaml b/cases/integration_test/v040/test_out_in_offline.yaml
new file mode 100644
index 00000000000..c3fa963f585
--- /dev/null
+++ b/cases/integration_test/v040/test_out_in_offline.yaml
@@ -0,0 +1,893 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: ["数据里有null、空串、特殊字符"]
+cases:
+ -
+ id: 0
+ desc: 数据里有null、空串、特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+# -
+# columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+# indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+# - load data infile '{0}.csv' into table {1};
+# - select * from {1};
+ expect:
+ count: 6
+ -
+ id: 1
+ desc: 全部数据类型测试
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 2
+ desc: 复杂sql结果导出
+ inputs:
+ -
+ columns : ["id int", "card_no string","merchant_id int", "trx_time timestamp", "trx_amt float"]
+ indexs: ["index1:card_no:trx_time"]
+ rows:
+ - [1, "aaaaaaaaaa",1, 1590738989000, 1.1]
+ - [2, "aaaaaaaaaa",1, 1590738990000, 2.2]
+ - [3, "bb",10, 1590738990000, 3.3]
+ -
+ columns : ["crd_lst_isu_dte timestamp", "crd_nbr string"]
+ indexs: ["index2:crd_nbr:crd_lst_isu_dte"]
+ rows:
+ - [1590738988000, "aaaaaaaaaa"]
+ - [1590738990000, "aaaaaaaaaa"]
+ - [1590738989000, "cc"]
+ - [1590738992000, "cc"]
+ -
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"]
+ sqls:
+ - select * from
+ (select
+ id,
+ card_no,
+ trx_time,
+ substr(card_no, 1, 6) as card_no_prefix,
+ sum(trx_amt) over w30d as sum_trx_amt,
+ count(merchant_id) over w10d as count_merchant_id
+ from {0}
+ window w30d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW),
+ w10d as (PARTITION BY {0}.card_no ORDER BY {0}.trx_time ROWS_RANGE BETWEEN 10d PRECEDING AND CURRENT ROW)) as trx_fe
+ last join {1} order by {1}.crd_lst_isu_dte on trx_fe.card_no = {1}.crd_nbr and trx_fe.trx_time >= {1}.crd_lst_isu_dte
+ into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns: ["id int", "card_no string", "trx_time timestamp", "card_no_prefix string","sum_trx_amt float", "count_merchant_id int64", "crd_lst_isu_dte timestamp","crd_nbr string"]
+ order: id
+ rows:
+ - [1, "aaaaaaaaaa", 1590738989000, "aaaaaa", 1.1, 1, 1590738988000, "aaaaaaaaaa"]
+ - [2, "aaaaaaaaaa", 1590738990000, "aaaaaa", 3.3, 2, 1590738990000, "aaaaaaaaaa"]
+ - [3, "bb", 1590738990000, "bb", 3.3, 1, null, null]
+ -
+ id: 3
+ desc: 全部数据类型测试
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 4
+ desc: 执行其他库查询
+ inputs:
+ -
+ db: db1
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from db1.{0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 5
+ desc: 导出insert结果
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - insert into {0} values (1,"aa",1590738989000) outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 6
+ desc: sql执行错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from db1.{0} into outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 7
+ desc: mode默认值,文件已经存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {0} into outfile '{0}.csv';
+ expect:
+ success: false
+ -
+ id: 8
+ desc: mode=overwrite,先到处大数据量,再到处小数据量
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='overwrite');
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ -
+ id: 9
+ desc: mode=append,相同的表到处两次
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {0} into outfile '{0}.csv' options(mode='append',header=false);
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 10
+ desc: mode=append,不同的表导出,第二次header=false
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='append',header=false);
+ - load data infile '{0}.csv' into table {2};
+ - select * from {2};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 11
+ desc: mode=append,不同的表导出,第二次header=true
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - select * from {1} into outfile '{0}.csv' options(mode='append',header=true);
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 2,bb,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,cc,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ -
+ id: 12
+ desc: option key错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(head=true);
+ expect:
+ success: false
+ -
+ id: 13
+ desc: option header 值错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header='true');
+ expect:
+ success: false
+ -
+ id: 14
+ desc: format 其他格式
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(format='txt');
+ expect:
+ success: false
+ -
+ id: 15
+ desc: delimiter为一些特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(delimiter='@');
+ - load data infile '{0}.csv' into table {1} options(delimiter='@');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 16
+ desc: null_value为特殊字符
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='~!@#$%^&*()_+');
+ - load data infile '{0}.csv' into table {1} options(null_value='~!@#$%^&*()_+');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 17
+ desc: String 有null 空串 ”null“ null_value为”“
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='');
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 5,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,
+ - 4,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,
+ - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ -
+ id: 18
+ desc: String 有null 空串 ”null“ null_value为”null“
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [4,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [5,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ - [6,"~!@#$%^&*()_+<",3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='null');
+ expect:
+ cat:
+ path: "{0}.csv"
+ lines:
+ - id,c1,c2,c3,c4,c5,c6,c7,c8,c9
+ - 3,,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 5,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null
+ - 4,null,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,true
+ - 1,aa,1,2,3,1.100000,2.100000,1590738989000,2020-05-01,true
+ - 6,~!@#$%^&*()_+<,3,22,32,1.300000,2.300000,1590738991000,2020-05-03,null
+ - 2,null,2,21,31,1.200000,2.200000,1590738990000,2020-05-02,false
+ -
+ id: 19
+ desc: header=false导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=false);
+ - load data infile '{0}.csv' into table {1} options(header=false);
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 20
+ desc: format=csv,导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(format='csv');
+ - load data infile '{0}.csv' into table {1} options(format='csv');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 21
+ desc: 路径文件夹不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '/{0}/{0}.csv';
+ expect:
+ success: false
+ -
+ id: 22
+ desc: 数据类型不匹配
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 int","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 23
+ desc: header=true导出数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1} options(header=true);
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 24
+ desc: header=true,csv没有header
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=false);
+ - load data infile '{0}.csv' into table {1} options(header=true);
+ expect:
+ success: false
+ -
+ id: 25
+ desc: header=false,csv有header
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1} options(header=false);
+ expect:
+ success: false
+ -
+ id: 26
+ desc: 表不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(header=true);
+ - load data infile '{0}.csv' into table {1}11 options(header=true);
+ expect:
+ success: false
+ -
+ id: 27
+ desc: format=csv,csv格式的文件,文件名不是csv结尾
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.txt' ;
+ - load data infile '{0}.txt' into table {1} options(format='csv');
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 28
+ desc: format=其他值
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1} options(format='txt');
+ expect:
+ success: false
+ -
+ id: 29
+ desc: 路径错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}1.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 30
+ desc: 导入其他库的表
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ db: db1
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table db1.{1};
+ - select * from db1.{1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 31
+ desc: 导出后导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {0};
+ - select * from {0};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 32
+ desc: 创建表的列和csv对不上
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","cc smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}1.csv' into table {1};
+ expect:
+ success: false
+ -
+ id: 33
+ desc: 表中已经有数据,然后导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ order: id
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"bb",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ id: 34
+ desc: delimiter为,数据中有,
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"b,b",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,"cc",3,22,32,1.3,2.3,1590738991000,"2020-05-03",true]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1} options(delimiter=',');
+ expect:
+ success: false
+ -
+ id: 35
+ desc: 导入-null_value=null
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='null');
+ - load data infile '{0}.csv' into table {1} options(null_value='null');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 36
+ desc: 导入-null_value=空串
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1,2,3,1.1,2.1,1590738989000,"2020-05-01",true]
+ - [2,"null",2,21,31,1.2,2.2,1590738990000,"2020-05-02",false]
+ - [3,null,3,22,32,1.3,2.3,1590738991000,"2020-05-03",null]
+ -
+ columns : ["id int","c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(null_value='');
+ - load data infile '{0}.csv' into table {1} options(null_value='');
+ - select * from {1};
+ expect:
+ count: 3
+ -
+ id: 37
+ desc: 表删除后再次导入
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ - [2,"bb",1590738990000]
+ - [3,"cc",1590738991000]
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ sqls:
+ - select * from {0} into outfile '{0}.csv';
+ - load data infile '{0}.csv' into table {1};
+ - drop table {1};
+ - create table {1}(
+ id int,
+ c1 string,
+ c7 timestamp,
+ index(key=(c1),ts=c7))options(partitionnum=1,replicanum=1);
+ - load data infile '{0}.csv' into table {1};
+ - select * from {1};
+ expect:
+ columns : ["id int","c1 string","c7 timestamp"]
+ order: id
+ rows:
+ - [1,"aa",1590738989000]
+ - [2,"bb",1590738990000]
+ - [3,"cc",1590738991000]
+ -
+ id: 38
+ desc: mode 值错误
+ inputs:
+ -
+ columns : ["id int","c1 string","c7 timestamp"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",1590738989000]
+ sqls:
+ - select * from {0} into outfile '{0}.csv' options(mode='true');
+ expect:
+ success: false
+
+
+
diff --git a/cases/integration_test/window/error_window.yaml b/cases/integration_test/window/error_window.yaml
new file mode 100644
index 00000000000..aee1f832e7e
--- /dev/null
+++ b/cases/integration_test/window/error_window.yaml
@@ -0,0 +1,370 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: no order by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c8:c4" ]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 1
+ desc: no partition by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c8:c4" ]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 2
+ desc: float为partition by - 未命中索引 - rtidb下不支持
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 3
+ desc: double为partition by - 未命中索引 - rtidb下不支持
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 4
+ desc: string为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c1 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 5
+ desc: float为order by
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c8:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.3,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c5, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c5 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 6
+ desc: double为order by
+ mode: offline-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c6, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 7
+ desc: date为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c8, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c8 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 33
+ desc: int为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 34
+ desc: smallint为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c3 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 35
+ desc: bool为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",false,21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c2 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 8
+ desc: BETWEEN加单位
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 9
+ desc: window名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w2 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 10
+ desc: window使用的表名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}1.c3 ORDER BY {0}1.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 11
+ desc: window使用的列名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c33 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 12
+ desc: window1 expression + window2 expression
+ tags: ["目前属于功能边界外, @chenjing计划支持依赖同类窗口的表达式"]
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, c4,
+ (sum(c4) over w1 + sum(c3) over w2) as sum_c3_c4_w1 FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 10 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 13
+ desc: ROWS Window 不支持MAXSIZE
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 10);
+ expect:
+ success: false
+ -
+ id: 14
+ desc: window名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w2 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 15
+ desc: window使用的表名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}1.c3 ORDER BY {0}1.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 16
+ desc: window使用的列名不存在
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c33 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ -
+ id: 37
+ desc: no frame
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7);
+ expect:
+ success: false
diff --git a/cases/integration_test/window/test_current_row.yaml b/cases/integration_test/window/test_current_row.yaml
new file mode 100644
index 00000000000..50128918b8b
--- /dev/null
+++ b/cases/integration_test/window/test_current_row.yaml
@@ -0,0 +1,1516 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.6.0
+cases:
+ - id: 0
+ desc: rows-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 1
+ desc: rows_range-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 2
+ desc: rows-current_row-有和当前行ts一致的数据
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 3
+ desc: rows_range-current_row-有和当前行ts一致的数据
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 4
+ desc: rows-纯历史窗口-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 5
+ desc: rows_range-纯历史窗口-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 6
+ desc: rows-current_row-ts=0
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 7
+ desc: rows_range-current_row-ts=0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 8
+ desc: rows-current_row-ts=-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 9
+ desc: rows_range-current_row-ts=-1
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,-1,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 10
+ desc: rows-current_row-ts=负数和0
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 11
+ desc: rows_range-current_row-ts=负数和0
+ tags: ["TODO","ts为负数有问题,带支持后再验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 bigint","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,-1000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 12
+ desc: rows-open-current_row
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 13
+ desc: rows_range-open-current_row-ts=0
+ tags: ["TODO","bug,修复后验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "aa",24,34,1.5,2.5,1590738993000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,null ]
+ - [ "aa",23,32 ]
+ - [ "aa",24,32 ]
+ - id: 14
+ desc: rows_range-current_row-maxsize小于窗口
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 15
+ desc: rows_range-current_row-maxsize大于窗口
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 3 EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 16
+ desc: rows-current_row-current_time
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 17
+ desc: rows_range-current_row-current_time
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738991000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,32 ]
+ - [ "bb",24,null ]
+ - id: 18
+ desc: window union rows-current_row-instance_not_in_window
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,63]
+ - [5,"ee",21,null]
+ - id: 19
+ desc: window union rows_range-current_row-instance_not_in_window
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,32]
+ - [5,"ee",21,null]
+ - id: 20
+ desc: window union rows-current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,62]
+ - [5,"ee",21,null]
+ - id: 21
+ desc: window union rows_range-current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,62]
+ - [5,"ee",21,null]
+ - id: 22
+ desc: rows窗口包含open/maxsize/instance_not_in_window/current_time/current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,63]
+ - [4,"dd",20,67]
+ - [5,"ee",21,null]
+ - id: 23
+ desc: rows_range窗口包含open/maxsize/instance_not_in_window/current_time/current_row
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [6,"cc",20,35,1.3,2.3,1590738993000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 1 EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32]
+ - [4,"dd",20,35]
+ - [5,"ee",21,null]
+ - id: 24
+ desc: rows-lag-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 25
+ desc: rows_range-lag-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, lag(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 26
+ desc: rows-at-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 27
+ desc: rows_range-at-current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, at(c4,2) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,30 ]
+ - [ "aa",23,31 ]
+ - [ "bb",24,null ]
+ - id: 28
+ desc: 两个窗口,一个rows,一个rows_range,current_row
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c5) OVER w2 as w2_c5_count FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 rows_range BETWEEN 2s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint","w2_c5_count bigint" ]
+ rows:
+ - [ "aa",20,null,0 ]
+ - [ "aa",21,30,1 ]
+ - [ "aa",22,61,2 ]
+ - [ "aa",23,63,2 ]
+ - [ "bb",24,null,0 ]
+ - id: 29
+ desc: current_row小写
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW exclude current_row);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 30
+ desc: maxsize位置错误
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW MAXSIZE 2);
+ expect:
+ success: false
+ - id: 31
+ desc: rows-纯历史窗口-current_row-ts=0
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,30 ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,63 ]
+ - [ "bb",24,null ]
+ - id: 32
+ desc: rows_range-纯历史窗口-current_row-ts=0
+ tags: ["TODO","bug,修复后验证"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,0,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,0,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,2000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,null ]
+ - [ "aa",21,null ]
+ - [ "aa",22,61 ]
+ - [ "aa",23,93 ]
+ - [ "bb",24,null ]
+
+ ###################################################
+ # tests for window attribute 'EXCLUDE CURRENT_ROW'
+ # - id: 20 - 23: exclude current_row window + lag window
+ # - id: 24 - 30: exclude current_row window + (maxsize, exclude current_time, instance_not_in_window)
+ ###################################################
+ - id: 20
+ desc: |
+ rows_range window union with exclude current_row. batch not support see 1807
+ mode: batch-unsupport
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 2, 233, 200, 200
+ 2, 3, 233, 21, 21
+ - id: 21
+ desc: |
+ rows_range window union with exclude current_row and exclude current_time
+ mode: batch-unsupport,disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 40
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 1, 233, 233, 233
+ 2, 1, 233, 233, 233
+ 3, 4, 233, 5, 5
+ - id: 22
+ desc: |
+ rows_range window union with exclude current_row and instance_not_in_window
+ mode: batch-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 2, 233, 200, 200
+ 2, 2, 233, 200, 200
+ - id: 23
+ desc: |
+ rows_range window union with exclude current_row, instance_not_in_window and exclude_current_time
+ mode: batch-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 40
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 1, 233, 233, 233
+ 2, 1, 233, 233, 233
+ 3, 2, 233, 200, 200
+ 4, 3, 233, 17, 17
+
+ # rows_range union window with exclude current_row, single window
+ - id: 24
+ desc: |
+ rows_range union window with exclude_current_row
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, -1
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding and 0s preceding
+ EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 2, 233, 200
+ 2, 3, 233, 21
+ 3, 5, 233, 5
+ 4, 6, 233, 0
+ - id: 25
+ desc: |
+ rows_range union window with exclude_current_row and exclude_current_time
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 1, 233, 233
+ 2, 1, 233, 233
+ 3, 4, 233, 5
+ 4, 6, 233, 0
+ - id: 26
+ desc: |
+ rows_range union window with exclude_current_row and instance_not_in_window
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ # instance_not_in_window not optimize main table
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(table=t1)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 2, 233, 200
+ 2, 2, 233, 200
+ 3, 3, 233, 17
+ 4, 3, 233, 17
+ - id: 27
+ desc: |
+ rows_range union window with exclude_current_row, exclude current_time and instance_not_in_window
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(table=t1)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 1, 233, 233
+ 2, 1, 233, 233
+ 3, 2, 233, 200
+ 4, 3, 233, 17
+ - id: 28
+ desc: |
+ rows_range union window with exclude_current_row, exclude current_time, instance_not_in_window and maxsize
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(table=t1)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 1, 233, 233
+ 2, 1, 233, 233
+ 3, 2, 233, 200
+ 4, 2, 200, 17
+ - id: 29
+ desc: |
+ rows_range union window with exclude_current_row, instance_not_in_window and maxsize
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(table=t1)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, INSTANCE_NOT_IN_WINDOW, partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=)
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(table=t1)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 2, 233, 200
+ 2, 2, 233, 200
+ 3, 2, 200, 17
+ 4, 2, 200, 17
+ - id: 30
+ desc: |
+ rows_range union window with exclude_current_row, exclude_current_time and maxsize
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 5
+ 3, 101, 111, 0
+ 4, 102, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_TIME, EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g))
+ +-UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(g))
+ RENAME(name=t1)
+ DATA_PROVIDER(type=Partition, table=t2, index=idx)
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 3s preceding AND CURRENT ROW
+ MAXSIZE 2
+ EXCLUDE CURRENT_ROW EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ order: id
+ data: |
+ 1, 1, 233, 233
+ 2, 1, 233, 233
+ 3, 2, 21, 5
+ 4, 2, 17, 0
\ No newline at end of file
diff --git a/cases/integration_test/window/test_maxsize.yaml b/cases/integration_test/window/test_maxsize.yaml
new file mode 100644
index 00000000000..3a9744cf019
--- /dev/null
+++ b/cases/integration_test/window/test_maxsize.yaml
@@ -0,0 +1,747 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: maxsize小于窗口的大小
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ -
+ id: 1
+ desc: maxsize大于窗口的大小
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 5);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ -
+ id: 2
+ desc: maxsize等于窗口的大小
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 3);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ -
+ id: 3
+ desc: maxsize=0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 0);
+ expect:
+ success: false
+ -
+ id: 4
+ desc: maxsize=1
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE 1);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [3,"aa",32]
+ - [4,"aa",33]
+ - [5,"aa",34]
+ -
+ id: 5
+ desc: maxsize=-1
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW MAXSIZE -1);
+ expect:
+ success: false
+ -
+ id: 6
+ desc: 纯历史窗口-maxsize
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND 1 PRECEDING MAXSIZE 3);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa", NULL]
+ - [2,"aa",30]
+ - [3,"aa",61]
+ - [4,"aa",93]
+ - [5,"aa",96]
+ -
+ id: 7
+ desc: 没有数据进入maxsize的窗口
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND 3 PRECEDING MAXSIZE 3);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa", NULL]
+ - [2,"aa", NULL]
+ - [3,"aa", NULL]
+ -
+ id: 8
+ desc: 两个pk,都大于maxsize
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ - [6,"bb",24,35,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND 0 PRECEDING MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",63]
+ - [4,"bb",33]
+ - [5,"bb",67]
+ - [6,"bb",69]
+ -
+ id: 9
+ desc: 两个pk,一个大于maxsize,一个小于maxsize
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND 0 PRECEDING MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",63]
+ - [4,"bb",33]
+ - [5,"bb",67]
+ -
+ id: 10
+ desc: 两个窗口的maxsize一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,99]
+ -
+ id: 11
+ desc: 两个窗口的maxsize不一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 2)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,63]
+ - [4,"aa",96,65]
+ - [5,"aa",99,67]
+ -
+ id: 12
+ desc: 两个窗口不同的key的maxsize一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",21,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,33]
+ - [5,"aa",99,67]
+ -
+ id: 13
+ desc: 两个窗口不同的ts的maxsize一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c1:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,99]
+ -
+ id: 14
+ desc: 两个窗口一个带有maxsize,一个没有
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,126]
+ - [5,"aa",99,160]
+ -
+ id: 15
+ desc: 两个窗口不同的key的maxsize不一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",21,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 2)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,63]
+ - [4,"aa",96,33]
+ - [5,"aa",99,67]
+ -
+ id: 16
+ desc: 两个窗口的不同的ts的maxsize不一致
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c1:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 4)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,126]
+ - [5,"aa",99,130]
+ -
+ id: 17
+ desc: 两个窗口相同的key的一个maxsize大于窗口一个小于窗口
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 5),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",126,96]
+ - [5,"aa",130,99]
+ -
+ id: 18
+ desc: 两个窗口不同的key的一个maxsize大于窗口一个小于窗口
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",21,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0}
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 5),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 3)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",126,96]
+ - [5,"aa",130,34]
+ -
+ id: 19
+ desc: union结合maxsize
+ mode: cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [5,"aa",67]
+ -
+ id: 20
+ desc: union结合maxsize-两个窗口
+ mode: cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",95,67]
+ -
+ id: 21
+ desc: union+maxsize+INSTANCE_NOT_IN_WINDOW
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2 INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [5,"aa",67]
+ -
+ id: 22
+ desc: union子查询结合maxsize
+ mode: cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION (select * from {1}) PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [5,"aa",67]
+ -
+ id: 23-1
+ desc: lastjoin结合maxsize
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"bb",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"bb",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"bb",21,34,1.2,2.2,1590738990004,"2020-05-02"]
+ sql: |
+ select {0}.id,{0}.c1,{0}.c3,{1}.c4,
+ sum({1}.c4) OVER w1 as w1_c4_sum
+ from {0}
+ last join {1} ORDER BY {1}.c7 on {0}.c1={1}.c1
+ WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2)
+ ;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","c4 bigint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,32,32]
+ - [2,"aa",21,32,64]
+ - [3,"aa",22,32,64]
+ - [4,"bb",23,34,34]
+ - [5,"bb",24,34,68]
+ -
+ id: 24
+ desc: union多表结合maxsize
+ mode: cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3 PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [5,"aa",67]
+ -
+ id: 25
+ desc: maxsize-rows
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW MAXSIZE 3);
+ expect:
+ success: false
+ -
+ id: 26
+ desc: 两个union,不同的maxsize
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND CURRENT ROW MAXSIZE 4),
+ w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 5 PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",127,67]
diff --git a/cases/integration_test/window/test_window.yaml b/cases/integration_test/window/test_window.yaml
new file mode 100644
index 00000000000..3a23ef33577
--- /dev/null
+++ b/cases/integration_test/window/test_window.yaml
@@ -0,0 +1,1221 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: 一个pk窗口的大小大于所有数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 6 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",126]
+ - [5,"aa",160]
+ -
+ id: 1
+ desc: 一个pk窗口的大小等于所有数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 4 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",126]
+ - [5,"aa",160]
+ -
+ id: 2
+ desc: 一个pk窗口的大小小于所有数据
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ -
+ id: 3
+ desc: 一个pk所有数据都不在窗口内
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 5 PRECEDING AND 3 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",null]
+ -
+ id: 4
+ desc: 窗口只要当前行
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 0 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [3,"aa",32]
+ -
+ id: 5
+ desc: 窗口只要当前行
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 0 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",31]
+ - [3,"aa",32]
+ -
+ id: 6
+ desc: 最后一行进入窗口
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 3 PRECEDING AND 2 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",null]
+ - [3,"aa",30]
+ -
+ id: 7
+ desc: 纯历史窗口-滑动
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",30]
+ - [3,"aa",61]
+ - [4,"aa",63]
+ - [5,"aa",65]
+ -
+ id: 8
+ desc: 两个pk,一个没有进入窗口,一个滑动
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",null]
+ - [2,"aa",30]
+ - [3,"aa",61]
+ - [4,"aa",63]
+ - [5,"bb",null]
+ -
+ id: 9
+ desc: 两个pk,一个全部进入窗口,一个滑动
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND 0 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"bb",34]
+ -
+ id: 10
+ desc: 两个pk都滑动
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ - [6,"bb",24,35,1.5,2.5,1590738990005,"2020-05-05"]
+ - [7,"bb",24,36,1.5,2.5,1590738990006,"2020-05-05"]
+ - [8,"bb",24,37,1.5,2.5,1590738990007,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND 0 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",63]
+ - [4,"aa",65]
+ - [5,"bb",34]
+ - [6,"bb",69]
+ - [7,"bb",71]
+ - [8,"bb",73]
+ -
+ id: 11
+ desc: ts列乱序
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",93]
+ - [4,"aa",96]
+ - [5,"aa",99]
+ -
+ id: 12
+ desc: ts列乱序
+ mode: batch-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",30]
+ - [2,"aa",61]
+ - [3,"aa",62]
+ - [4,"aa",33]
+ - [5,"aa",99]
+ -
+ id: 13
+ desc: ts列相同
+ mode: disk-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",23,33,1.4,2.4,1590738990000,"2020-05-04"]
+ - [2,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",21,31,1.2,2.2,1590738990005,"2020-05-02"]
+ - [5,"aa",24,34,1.5,2.5,1590738990005,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ 1:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",1]
+ - [5,"aa",2]
+ -
+ id: 14
+ desc: 每次上一条都划出
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",23,30,1.4,2.4,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990003,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990006,"2020-05-03"]
+ - [4,"aa",21,33,1.2,2.2,1590738990009,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990012,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, count(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",2]
+ - [3,"aa",3]
+ - [4,"aa",3]
+ - [5,"aa",3]
+ 1:
+ rows:
+ - [1,"aa",1]
+ - [2,"aa",1]
+ - [3,"aa",1]
+ - [4,"aa",1]
+ - [5,"aa",1]
+ -
+ id: 15
+ desc: pk包含null
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,null,21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,null,22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,null,23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30]
+ - [2,null,31]
+ - [3,null,63]
+ - [4,null,65]
+ - [5,"aa",64]
+ 1:
+ rows:
+ - [1,"aa",30]
+ - [2,null,31]
+ - [3,null,63]
+ - [4,null,65]
+ - [5,"aa",34]
+ -
+ id: 16
+ desc: pk包含空串
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30]
+ - [2,"",31]
+ - [3,"",63]
+ - [4,"",65]
+ - [5,"aa",64]
+ 1:
+ rows:
+ - [1,"aa",30]
+ - [2,"",31]
+ - [3,"",63]
+ - [4,"",65]
+ - [5,"aa",34]
+ -
+ id: 17
+ desc: pk包含空串和null
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,null,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,null,24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,null,30]
+ - [2,"",31]
+ - [3,"",63]
+ - [4,"",65]
+ - [5,null,64]
+ 1:
+ rows:
+ - [1,null,30]
+ - [2,"",31]
+ - [3,"",63]
+ - [4,"",65]
+ - [5,null,34]
+ -
+ id: 18
+ desc: 两个窗口相同的pk,相同的ts,相同的聚合函数
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,99]
+ -
+ id: 19
+ desc: 两个窗口相同的pk,相同的ts,不同的列
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c3) OVER w2 as w2_c3_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c3_sum int"]
+ rows:
+ - [1,"aa",30,20]
+ - [2,"aa",61,41]
+ - [3,"aa",93,63]
+ - [4,"aa",96,66]
+ - [5,"aa",99,69]
+ -
+ id: 20
+ desc: 两个窗口相同的pk,相同的ts,不同的函数
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"aa",61,2]
+ - [3,"aa",93,3]
+ - [4,"aa",96,3]
+ - [5,"aa",99,3]
+ -
+ id: 21
+ desc: sum超过int的范围
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",2147483647,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c3) OVER w1 as w1_c3_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c3_sum int"]
+ rows:
+ - [1,"aa",20]
+ - [2,"aa",41]
+ - [3,"aa",63]
+ - [4,"aa",66]
+ - [5,"aa",-2147483604]
+ -
+ id: 22
+ desc: 两个窗口相同的pk,不同的ts,相同的聚合函数
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c1:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 d[0] BETWEEN 1 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,63]
+ - [4,"aa",96,65]
+ - [5,"aa",99,67]
+ -
+ id: 23
+ desc: 两个窗口不同的pk,相同的ts,相同的聚合函数
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c8:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-01"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,99]
+ -
+ id: 24
+ desc: 两个窗口不同的pk,相同的ts,相同的聚合函数,一个窗口两个pk
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c8:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,34]
+ -
+ id: 25
+ desc: 两个窗口不同的pk,不同的ts,相同的聚合函数
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c8:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c8 ORDER BY {0}.c4 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [3,"aa",93,93]
+ - [4,"aa",96,96]
+ - [5,"aa",99,34]
+ -
+ id: 26
+ desc: 两个窗口不同的ts,一个都在窗口内,一个都不进入窗口
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index1:c1:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 5 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c4 d[0] BETWEEN 6 PRECEDING AND 5 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,null]
+ - [2,"aa",61,null]
+ - [3,"aa",93,null]
+ - [4,"aa",126,null]
+ - [5,"aa",160,null]
+ -
+ id: 27
+ desc: 两个窗口,一个union,一个不union
+ mode: rtidb-batch-unsupport,cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",95,99]
+ 1:
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",34,99]
+ -
+ id: 28
+ desc: 两个窗口,一个union一个表,一个union两个表
+ mode: rtidb-batch-unsupport,cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ expectProvider:
+ 0:
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",97,99]
+ 1:
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",66,99]
+ -
+ id: 29
+ desc: 两个窗口,一个union,一个INSTANCE_NOT_IN_WINDOW
+ mode: rtidb-batch-unsupport,cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,31]
+ - [4,"aa",96,65]
+ - [5,"aa",99,66]
+ -
+ id: 30
+ desc: 两个窗口,一个union一个表,一个union使用子查询
+ mode: rtidb-batch-unsupport,cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ dataProvider:
+ - ["ROWS","ROWS_RANGE"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION (select * from {1}) PARTITION BY {0}.c1 ORDER BY {0}.c7 d[0] BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30]
+ - [2,"aa",61,61]
+ - [5,"aa",99,99]
+ -
+ id: 31
+ desc: 多个窗口-rows
+ mode: rtidb-batch-unsupport,cluster-unsupport
+ tags: ["cluster-执行失败", "@chenjing batch online fix for multi window with union", "@tobe batch offline fix"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"aa",24,34,1.5,2.5,1590738990004,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"aa",21,31,1.2,2.2,1590738990001,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [3,"aa",22,32,1.3,2.3,1590738990002,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [4,"aa",23,33,1.4,2.4,1590738990003,"2020-05-01"]
+ sql: |
+ SELECT id, c1, sum(c4) OVER w1 as w1_c4_sum, sum(c4) OVER w2 as w2_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1},{2},{3} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1},{2} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 4 PRECEDING AND 1 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,0]
+ - [5,"aa",160,93]
+ -
+ id: 32
+ desc: 多个窗口包含不同的单位
+ mode: cluster-unsupport
+ tags: ["cluster-执行失败"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"aa",20,32,1.1,2.1,1590738992000,"2020-05-01"]
+ - [4,"aa",20,33,1.1,2.1,1590739110000,"2020-05-01"]
+ - [5,"aa",20,34,1.1,2.1,1590746190000,"2020-05-01"]
+ - [6,"aa",20,35,1.1,2.1,1590911790000,"2020-05-01"]
+ -
+ columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738991000,"2020-05-01"]
+ - [2,"aa",21,31,1.2,2.2,1590738993000,"2020-05-02"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590739050000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590739170000,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590742590000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590749790000,"2020-05-01"]
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590825390000,"2020-05-01"]
+ - [2,"aa",20,31,1.1,2.1,1590998190000,"2020-05-01"]
+ sql: |
+ SELECT id, c1,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum,
+ sum(c4) OVER w3 as w3_c4_sum,
+ sum(c4) OVER w4 as w4_c4_sum,
+ sum(c4) OVER w5 as w5_c4_sum
+ FROM {0} WINDOW
+ w1 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w3 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW),
+ w4 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW),
+ w5 AS (UNION {1},{2},{3},{4} PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","w1_c4_sum bigint","w2_c4_sum bigint","w3_c4_sum bigint","w4_c4_sum bigint","w5_c4_sum bigint"]
+ rows:
+ - [1,"aa",30,30,30,30,30]
+ - [2,"aa",61,61,61,61,61]
+ - [3,"aa",32,123,123,123,123]
+ - [4,"aa",33,33,217,217,217]
+ - [5,"aa",34,34,34,312,312]
+ - [6,"aa",35,35,35,35,408]
+
+ - id: 33
+ desc: |
+ first_value results in two rows_range window, refer https://github.com/4paradigm/OpenMLDB/issues/1587
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130404000, g2, 4
+ 7, 1612130405000, g2, 3
+ 8, 1612130406000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ first_value(val1) over w1 as agg1,
+ first_value(val1) over w2 as agg2,
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows_range between 5s preceding and 0s preceding),
+ w2 as (partition by `group1` order by `ts` rows_range between 5s preceding and 1s preceding);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
+
+ - id: 34
+ desc: |
+ first_value results in two rows windows
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ rows:
+ - [1, 1612130400000, g1, 1]
+ - [2, 1612130401000, g1, 2]
+ - [3, 1612130402000, g1, 3]
+ - [4, 1612130403000, g1, 4]
+ - [5, 1612130404000, g1, 5]
+ - [6, 1612130404000, g2, 4]
+ - [7, 1612130405000, g2, 3]
+ - [8, 1612130406000, g2, 2]
+ sql: |
+ select
+ `id`,
+ `val1`,
+ first_value(val1) over w1 as agg1,
+ first_value(val1) over w2 as agg2,
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows between 5 preceding and 0 preceding),
+ w2 as (partition by `group1` order by `ts` rows between 5 preceding and 1 preceding);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
+
+ - id: 35
+ desc: |
+ first_value results in rows/rows_range windows
+ inputs:
+ - columns: [ "id int","ts timestamp","group1 string","val1 int" ]
+ indexs: [ "index1:group1:ts" ]
+ name: t1
+ data: |
+ 1, 1612130400000, g1, 1
+ 2, 1612130401000, g1, 2
+ 3, 1612130402000, g1, 3
+ 4, 1612130403000, g1, 4
+ 5, 1612130404000, g1, 5
+ 6, 1612130404000, g2, 4
+ 7, 1612130405000, g2, 3
+ 8, 1612130406000, g2, 2
+ sql: |
+ select
+ `id`,
+ `val1`,
+ first_value(val1) over w1 as agg1,
+ first_value(val1) over w2 as agg2,
+ from `t1` WINDOW
+ w1 as (partition by `group1` order by `ts` rows_range between 5s preceding and 0s preceding),
+ w2 as (partition by `group1` order by `ts` rows between 5 preceding and 1 preceding);
+ expect:
+ columns: ["id int", "val1 int", "agg1 int", "agg2 int"]
+ order: id
+ rows:
+ - [1, 1, 1, NULL]
+ - [2, 2, 2, 1]
+ - [3, 3, 3, 2]
+ - [4, 4, 4, 3]
+ - [5, 5, 5, 4]
+ - [6, 4, 4, NULL]
+ - [7, 3, 3, 4]
+ - [8, 2, 2, 3]
+
+ - id: 36
+ version: 0.6.0
+ desc: |
+ correctness for window functions over window whose border is open
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 21
+ 2, 100, 111, 22
+ 3, 101, 111, 23
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ partition by `g` order by `ts`
+ ROWS between 3 OPEN preceding and 0 OPEN PRECEDING);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 0, NULL, NULL, NULL
+ 2, 1, 21, 21, 21
+ 3, 2, 22, 21, 22
+
+ - id: 37
+ version: 0.6.0
+ desc: |
+ correctness for rows_range window functions over window whose border is open
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ partition by `g` order by `ts`
+ ROWS_RANGE between 2s OPEN PRECEDING and 0s OPEN preceding);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 0, NULL, NULL, NULL
+ 2, 1, 21, 21, 21
+ 3, 1, 22, 22, 22
diff --git a/cases/integration_test/window/test_window_exclude_current_time.yaml b/cases/integration_test/window/test_window_exclude_current_time.yaml
new file mode 100644
index 00000000000..46f3eeec19f
--- /dev/null
+++ b/cases/integration_test/window/test_window_exclude_current_time.yaml
@@ -0,0 +1,762 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+version: 0.5.0
+debugs: []
+cases:
+ - id: 0
+ mode: disk-unsupport
+ desc: ROWS_RANGE Window OPEN PRECEDING EXCLUDE CURRENT_TIME
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",-2, 1.0, 0 ]
+ - [ "aa",-1, 1.0, 0 ]
+ - [ "aa",0, 1.0, 0 ]
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", -2, 0, 1.0 ]
+ - [ "aa", -1, 0, 1.0 ]
+ - [ "aa", 0, 0, 1.0 ]
+ - [ "aa", 1, 1590738990000, 1.0 ]
+ - [ "aa", 2, 1590738990000, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0 ]
+ - [ "aa", 6, 1590738994000, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0 ]
+ - id: 1
+ desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING EXCLUDE CURRENT_TIME
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0 ]
+ - [ "aa", 2, 1590738990000, 1.0 ]
+ - [ "aa", 3, 1590738992000, 2.0 ]
+ - [ "aa", 4, 1590738993000, 2.0 ]
+ - [ "aa", 5, 1590738994000, 2.0 ]
+ - [ "aa", 6, 1590738994000, 2.0 ]
+ - [ "aa", 7, 1590738999000, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0 ]
+ - [ "aa", 9, 1590739002000, 2.0 ]
+ - id: 2
+ desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",-1, 1.0, 0 ]
+ - [ "aa", 0, 1.0, 0 ]
+ - [ "aa", 1, 1.0, 1590738990000 ]
+ - [ "aa", 2, 1.0, 1590738990000 ]
+ - [ "aa", 3, 1.0, 1590738992000 ]
+ - [ "aa", 4, 1.0, 1590738993000 ]
+ - [ "aa", 5, 1.0, 1590738994000 ]
+ - [ "aa", 6, 1.0, 1590738994000 ]
+ - [ "aa", 7, 1.0, 1590738999000 ]
+ - [ "aa", 8, 1.0, 1590739001000 ]
+ - [ "aa", 9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa",-1, 0, 1.0 ]
+ - [ "aa", 0, 0, 1.0 ]
+ - [ "aa", 1, 1590738990000, 1.0 ]
+ - [ "aa", 2, 1590738990000, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0 ]
+ - [ "aa", 6, 1590738994000, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0 ]
+ - id: 3
+ desc: ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",-1, 1.0, 0]
+ - [ "aa", 0, 1.0, 0]
+ - [ "aa", 1, 1.0, 1590738990000 ]
+ - [ "aa", 2, 1.0, 1590738990000 ]
+ - [ "aa", 3, 1.0, 1590738992000 ]
+ - [ "aa", 4, 1.0, 1590738993000 ]
+ - [ "aa", 5, 1.0, 1590738994000 ]
+ - [ "aa", 6, 1.0, 1590738994000 ]
+ - [ "aa", 7, 1.0, 1590738999000 ]
+ - [ "aa", 8, 1.0, 1590739001000 ]
+ - [ "aa", 9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa",-1, 0, 1.0 ]
+ - [ "aa", 0, 0, 1.0 ]
+ - [ "aa", 1, 1590738990000, 3.0 ]
+ - [ "aa", 2, 1590738990000, 3.0 ]
+ - [ "aa", 3, 1590738992000, 5.0 ]
+ - [ "aa", 4, 1590738993000, 6.0 ]
+ - [ "aa", 5, 1590738994000, 7.0 ]
+ - [ "aa", 6, 1590738994000, 7.0 ]
+ - [ "aa", 7, 1590738999000, 7.0 ]
+ - [ "aa", 8, 1590739001000, 7.0 ]
+ - [ "aa", 9, 1590739002000, 7.0 ]
+ - id: 4
+ desc: ROWS and ROWS Window OPEN PRECEDING EXCLUDE CURRENT_TIME
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 3.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 7.0 ]
+
+ - id: 5
+ mode: offline-unsupport,disk-unsupport
+ desc: ROWS_RANGE Window and EXCLUDE CURRENT_TIME Window
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0 ]
+ - id: 6
+ desc: ROWS_RANGE Window with MaxSize 2 and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 2.0, 2.0 ]
+ - [ "aa", 4, 1590738993000, 2.0, 2.0 ]
+ - [ "aa", 5, 1590738994000, 2.0, 2.0 ]
+ - [ "aa", 6, 1590738994000, 2.0, 2.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 2.0, 2.0 ]
+ - id: 7
+ desc: ROWS_RANGE Window with MaxSize 10 and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0 ]
+ - id: 8
+ desc: ROWS Window and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 7.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 7.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 7.0, 7.0 ]
+ - id: 9
+ desc: ROWS and ROWS Window and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum,
+ sum(c4) OVER w3 as w3_c4_sum,
+ sum(c4) OVER w4 as w4_c4_sum,
+ sum(c4) OVER w5 as w5_c4_sum,
+ sum(c4) OVER w6 as w6_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW),
+ w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double",
+ "w3_c4_sum double", "w4_c4_sum double",
+ "w5_c4_sum double", "w6_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ]
+
+ - id: 10
+ desc: ROWS_RANGE Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0 ]
+ - id: 11
+ desc: ROWS_RANGE Window with MaxSize 2 OPEN PRECEDING amd EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 2.0, 2.0 ]
+ - [ "aa", 4, 1590738993000, 2.0, 2.0 ]
+ - [ "aa", 5, 1590738994000, 2.0, 2.0 ]
+ - [ "aa", 6, 1590738994000, 2.0, 2.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 2.0, 2.0 ]
+ - id: 12
+ desc: ROWS_RANGE Window with MaxSize 10 OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 10 EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0 ]
+ - id: 13
+ desc: ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 7.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 7.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 7.0, 7.0 ]
+ - id: 14
+ desc: ROWS and ROWS Window OPEN PRECEDING and EXCLUDE CURRENT_TIME Window
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum,
+ sum(c4) OVER w3 as w3_c4_sum,
+ sum(c4) OVER w4 as w4_c4_sum,
+ sum(c4) OVER w5 as w5_c4_sum,
+ sum(c4) OVER w6 as w6_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW),
+ w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double",
+ "w3_c4_sum double", "w4_c4_sum double",
+ "w5_c4_sum double", "w6_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ]
+ - id: 16
+ desc: ROWS and ROWS Window 各类窗口混合
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738994000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum,
+ sum(c4) OVER w3 as w3_c4_sum,
+ sum(c4) OVER w4 as w4_c4_sum,
+ sum(c4) OVER w5 as w5_c4_sum,
+ sum(c4) OVER w6 as w6_c4_sum,
+ sum(c4) OVER w7 as w7_c4_sum,
+ sum(c4) OVER w8 as w8_c4_sum,
+ sum(c4) OVER w9 as w9_c4_sum,
+ sum(c4) OVER w10 as w10_c4_sum,
+ sum(c4) OVER w11 as w11_c4_sum,
+ sum(c4) OVER w12 as w12_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW),
+ w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 6 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w7 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW),
+ w8 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w9 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2),
+ w10 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 4s OPEN PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_TIME),
+ w11 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW),
+ w12 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp",
+ "w1_c4_sum double", "w2_c4_sum double",
+ "w3_c4_sum double", "w4_c4_sum double",
+ "w5_c4_sum double", "w6_c4_sum double",
+ "w7_c4_sum double", "w8_c4_sum double",
+ "w9_c4_sum double", "w10_c4_sum double",
+ "w11_c4_sum double", "w12_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738990000, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0, 3.0, 3.0, 2.0, 2.0, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738994000, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0, 4.0, 3.0, 2.0, 2.0, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0, 1.0, 1.0, 1.0, 1.0, 7.0, 7.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 7.0 ]
+ - id: 17
+ desc: ROWS Window with same timestamp
+ mode: offline-unsupport,disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738990000 ]
+ - [ "aa",4, 1.0, 1590738990000 ]
+ - [ "aa",5, 1.0, 1590738990000 ]
+ - [ "aa",6, 1.0, 1590738990000 ]
+ - [ "aa",7, 1.0, 1590738991000 ]
+ - [ "aa",8, 1.0, 1590738992000 ]
+ - [ "aa",9, 1.0, 1590738993000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double"]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0]
+ - [ "aa", 2, 1590738990000, 2.0]
+ - [ "aa", 3, 1590738990000, 3.0]
+ - [ "aa", 4, 1590738990000, 4.0]
+ - [ "aa", 5, 1590738990000, 4.0]
+ - [ "aa", 6, 1590738990000, 4.0]
+ - [ "aa", 7, 1590738991000, 4.0]
+ - [ "aa", 8, 1590738992000, 4.0]
+ - [ "aa", 9, 1590738993000, 4.0]
+ - id: 18
+ desc: ROWS Window with same timestamp Exclude CurretTime
+ mode: disk-unsupport
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738990000 ]
+ - [ "aa",4, 1.0, 1590738990000 ]
+ - [ "aa",5, 1.0, 1590738990000 ]
+ - [ "aa",6, 1.0, 1590738990000 ]
+ - [ "aa",7, 1.0, 1590738991000 ]
+ - [ "aa",8, 1.0, 1590738992000 ]
+ - [ "aa",9, 1.0, 1590738993000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double"]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0]
+ - [ "aa", 2, 1590738990000, 1.0]
+ - [ "aa", 3, 1590738990000, 1.0]
+ - [ "aa", 4, 1590738990000, 1.0]
+ - [ "aa", 5, 1590738990000, 1.0]
+ - [ "aa", 6, 1590738990000, 1.0]
+ - [ "aa", 7, 1590738991000, 4.0]
+ - [ "aa", 8, 1590738992000, 4.0]
+ - [ "aa", 9, 1590738993000, 4.0]
+ - id: 19
+ desc: ROWS, ROWS_RANGE Window, Normal Window, OPEN Window, EXCLUDE CURRENT TIME Window
+ mode: batch-unsupport,disk-unsupport
+ tags: ["@chendihao, @baoxinqi, 测试的时候spark需要保证输入数据滑入顺序"]
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738990000 ]
+ - [ "aa",3, 1.0, 1590738990000 ]
+ - [ "aa",4, 1.0, 1590738990000 ]
+ - [ "aa",5, 1.0, 1590738990000 ]
+ - [ "aa",6, 1.0, 1590738990000 ]
+ - [ "aa",7, 1.0, 1590738991000 ]
+ - [ "aa",8, 1.0, 1590738992000 ]
+ - [ "aa",9, 1.0, 1590738993000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum,
+ sum(c4) OVER w3 as w3_c4_sum,
+ sum(c4) OVER w4 as w4_c4_sum,
+ sum(c4) OVER w5 as w5_c4_sum,
+ sum(c4) OVER w6 as w6_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 OPEN PRECEDING AND CURRENT ROW),
+ w3 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME),
+ w4 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w5 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s OPEN PRECEDING AND CURRENT ROW),
+ w6 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s OPEN PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
+
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double", "w3_c4_sum double",
+ "w4_c4_sum double", "w5_c4_sum double", "w6_c4_sum double"]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
+ - [ "aa", 2, 1590738990000, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0]
+ - [ "aa", 3, 1590738990000, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0]
+ - [ "aa", 4, 1590738990000, 4.0, 3.0, 1.0, 4.0, 4.0, 1.0]
+ - [ "aa", 5, 1590738990000, 4.0, 3.0, 1.0, 5.0, 5.0, 1.0]
+ - [ "aa", 6, 1590738990000, 4.0, 3.0, 1.0, 6.0, 6.0, 1.0]
+ - [ "aa", 7, 1590738991000, 4.0, 3.0, 3.0, 7.0, 7.0, 7.0]
+ - [ "aa", 8, 1590738992000, 4.0, 3.0, 3.0, 8.0, 8.0, 8.0]
+ - [ "aa", 9, 1590738993000, 4.0, 3.0, 3.0, 9.0, 3.0, 3.0]
diff --git a/cases/integration_test/window/test_window_row.yaml b/cases/integration_test/window/test_window_row.yaml
new file mode 100644
index 00000000000..f5ca19ae890
--- /dev/null
+++ b/cases/integration_test/window/test_window_row.yaml
@@ -0,0 +1,847 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ -
+ id: 0
+ desc: string为partition by
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 1
+ desc: int为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ -
+ id: 2
+ desc: float为partition by - 未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c5 float","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",1.1,30]
+ - [2,"bb",1.1,61]
+ - [3,"cc",1.1,93]
+ - [4,"dd",1.1,96]
+ - [5,"ee",1.2,34]
+ -
+ id: 3
+ desc: double为partition by - 未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c6 double","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",2.1,30]
+ - [2,"bb",2.1,61]
+ - [3,"cc",2.1,93]
+ - [4,"dd",2.1,96]
+ - [5,"ee",2.2,34]
+ -
+ id: 4
+ desc: date为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c8:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c8, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c8 date","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa","2020-05-01",30]
+ - [2,"bb","2020-05-01",61]
+ - [3,"cc","2020-05-01",93]
+ - [4,"dd","2020-05-01",96]
+ - [5,"ee","2020-05-02",34]
+ -
+ id: 5
+ desc: timestamp为partition by
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c7:id"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c7, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","c7 timestamp","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",1590738990000,30]
+ - [2,"bb",1590738990000,61]
+ - [3,"cc",1590738990000,93]
+ - [4,"dd",1590738990000,96]
+ - [5,"ee",1590738991000,34]
+ -
+ id: 6
+ desc: bigint为partition by
+ inputs:
+ -
+ columns : ["id bigint","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c4:id"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.id ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id bigint","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",30,2]
+ - [3,"cc",30,3]
+ - [4,"dd",30,3]
+ - [5,"ee",31,1]
+ -
+ id: 7
+ desc: bigint为order by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c8:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 8
+ desc: 多个pk
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"aa",20,61]
+ - [3,"aa",20,93]
+ - [4,"aa",20,96]
+ - [5,"aa",24,34]
+ - [6,"bb",24,35]
+ -
+ id: 9
+ desc: 两个pk都使用了索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"aa",20,61]
+ - [3,"aa",20,93]
+ - [4,"aa",20,96]
+ - [5,"aa",24,34]
+ - [6,"bb",24,35]
+ -
+ id: 13-2
+ desc: 两个pk都使用了索引
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - [6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"aa",20,61]
+ - [3,"aa",20,93]
+ - [4,"aa",20,96]
+ - [5,"aa",24,34]
+ - [6,"bb",24,35]
+ -
+ id: 10
+ desc: 多个window指定相同的pk
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"bb",20,61,2]
+ - [3,"cc",20,93,3]
+ - [4,"dd",20,96,3]
+ - [5,"ee",21,34,1]
+ -
+ id: 11
+ desc: 多个window指定相不同的pk
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7", "index2:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY c1 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"cc",20,93,1]
+ - [4,"cc",20,96,2]
+ - [5,"ee",21,34,1]
+ -
+ id: 12
+ desc: 多个windowpk是table.column模式
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7", "index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"cc",20,93,1]
+ - [4,"cc",20,96,2]
+ - [5,"ee",21,34,1]
+ -
+ id: 13-1
+ desc: 多个window指定不同的ts, 数据时间乱序插入,batch模式预期
+ mode: request-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7", "index2:c3:c4"]
+ rows:
+ - [1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,33,3]
+ - [2,"bb",20,64,2]
+ - [3,"cc",20,94,1]
+ - [4,"dd",20,93,3]
+ - [5,"ee",21,34,1]
+ -
+ id: 13-2
+ desc: 多个window指定不同的ts, 数据时间乱序插入,request模式预期
+ mode: batch-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7", "index2:c3:c4"]
+ rows:
+ - [1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,33,1]
+ - [2,"bb",20,64,1]
+ - [3,"cc",20,94,1]
+ - [4,"dd",20,93,3]
+ - [5,"ee",21,34,1]
+ -
+ id: 13-3
+ desc: 多个window指定不同的ts, 数据按时间顺序插入
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7", "index2:c3:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"bb",20,61,2]
+ - [3,"cc",20,93,3]
+ - [4,"dd",20,96,3]
+ - [5,"ee",21,34,1]
+ -
+ id: 14
+ desc: 两个window其中两个pk为索引列
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"cc",20,93,1]
+ - [4,"cc",20,96,2]
+ - [5,"ee",21,34,1]
+ -
+ id: 15
+ desc: 两个window其中一个pk和两个pk
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7","index2:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"cc",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint"]
+ rows:
+ - [1,"aa",20,30,1]
+ - [2,"aa",20,61,2]
+ - [3,"cc",20,93,1]
+ - [4,"cc",20,96,2]
+ - [5,"cc",21,34,1]
+ -
+ id: 16
+ desc: 全部window
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, count(c3) OVER w1 as w1_c3_count, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ columns: ["id int", "w1_c3_count bigint","w1_c4_sum bigint"]
+ order: id
+ rows:
+ - [1, 1,30]
+ - [2, 2,61]
+ - [3, 3,93]
+ - [4, 3,96]
+ - [5, 1,34]
+ -
+ id: 17
+ desc: 结合limit
+ mode: request-unsupport
+ tags: ["TODO", "LIMIT批模式没有确定性输出"]
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [5,"ee",21,34]
+ -
+ id: 18
+ desc: window的计算结果不使用别名
+ mode: cli-unsupport
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","sum(c4)over w1 bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ -
+ id: 19
+ desc: case when window expression then window expression else null end
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c4,
+ case when lag(c1, 0) OVER w1 == "aa" then sum(c4) over w1
+ else null end
+ as sum_c1_w1 FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c4
+ columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"]
+ rows:
+ - ["aa",30,30]
+ - ["aa",31,61]
+ - ["aa",32,93]
+ - ["bb",33,NULL]
+ - ["bb",34,NULL]
+ -
+ id: 20
+ desc: case when window expr then window expr else window expr
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c4,
+ case when lag(c1, 0) OVER w1 == "aa" then sum(c4) over w1
+ else min(c4) over w1 end
+ as sum_c1_w1 FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c4
+ columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"]
+ rows:
+ - ["aa",30,30]
+ - ["aa",31,61]
+ - ["aa",32,93]
+ - ["bb",33,33]
+ - ["bb",34,33]
+ -
+ id: 21
+ desc: case when simple expression then window expression else null end
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c4,
+ case when c1 == "aa" then sum(c4) over w1
+ else null end
+ as sum_c1_w1 FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c4
+ columns: ["c1 string","c4 bigint","sum_c1_w1 bigint"]
+ rows:
+ - ["aa",30,30]
+ - ["aa",31,61]
+ - ["aa",32,93]
+ - ["bb",33,NULL]
+ - ["bb",34,NULL]
+ -
+ id: 22
+ desc: window expression + window expression
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["bb",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, c4,
+ (sum(c4) over w1 + sum(c3) over w1) as sum_c3_c4_w1 FROM {0}
+ WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: c4
+ columns: ["c1 string","c3 int", "c4 bigint","sum_c3_c4_w1 bigint"]
+ rows:
+ - ["aa",20, 30, 50]
+ - ["aa",21, 31, 102]
+ - ["aa",22, 32, 156]
+ - ["bb",23, 33, 56]
+ - ["bb",24, 34, 114]
+
+ -
+ id: 28
+ desc: 匿名窗口
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as w1_c4_sum FROM {0};
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 29
+ desc: 匿名窗口-没有小括号
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW as w1_c4_sum FROM {0};
+ expect:
+ success: false
+ -
+ id: 30
+ desc: smallint为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 smallint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ -
+ id: 31
+ desc: bool为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",true,20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",true,20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",true,20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",false,21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c2, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c2 bool","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",true,30]
+ - [2,"bb",true,61]
+ - [3,"cc",true,93]
+ - [4,"dd",true,96]
+ - [5,"ee",false,34]
+ -
+ id: 38
+ desc: rows 1-2
+ version: 0.6.0
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,NULL]
+ - ["aa",21,30]
+ - ["aa",22,61]
+ - ["aa",23,63]
+ - ["bb",24,NULL]
+ -
+ id: 39
+ desc: rows 0-2
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND 0 PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 40
+ desc: rows -1-2
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND -1 PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
diff --git a/cases/integration_test/window/test_window_row_range.yaml b/cases/integration_test/window/test_window_row_range.yaml
new file mode 100644
index 00000000000..a2763c48b4f
--- /dev/null
+++ b/cases/integration_test/window/test_window_row_range.yaml
@@ -0,0 +1,1411 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: string为partition by
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ "aa",21,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ "aa",22,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ "aa",23,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ "bb",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: c3
+ columns: [ "c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ "aa",20,30 ]
+ - [ "aa",21,61 ]
+ - [ "aa",22,93 ]
+ - [ "aa",23,96 ]
+ - [ "bb",24,34 ]
+ - id: 1
+ desc: int为partition by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",20,30 ]
+ - [ 2,"bb",20,61 ]
+ - [ 3,"cc",20,93 ]
+ - [ 4,"dd",20,96 ]
+ - [ 5,"ee",21,34 ]
+ - id: 2
+ desc: float为partition by - 未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.1,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.1,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.2,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c5, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c5 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c5 float","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",1.1,30 ]
+ - [ 2,"bb",1.1,61 ]
+ - [ 3,"cc",1.1,93 ]
+ - [ 4,"dd",1.1,96 ]
+ - [ 5,"ee",1.2,34 ]
+ - id: 3
+ desc: double为partition by - 未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c6, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c6 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c6 double","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",2.1,30 ]
+ - [ 2,"bb",2.1,61 ]
+ - [ 3,"cc",2.1,93 ]
+ - [ 4,"dd",2.1,96 ]
+ - [ 5,"ee",2.2,34 ]
+ - id: 4
+ desc: date为partition by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c8:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738991000,"2020-05-01" ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738992000,"2020-05-01" ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738993000,"2020-05-01" ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738994000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c8, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c8 date","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa","2020-05-01",30 ]
+ - [ 2,"bb","2020-05-01",61 ]
+ - [ 3,"cc","2020-05-01",93 ]
+ - [ 4,"dd","2020-05-01",96 ]
+ - [ 5,"ee","2020-05-02",34 ]
+ - id: 5
+ desc: timestamp为partition by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 timestamp" ]
+ indexs: [ "index1:c7:c9" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01",1590738990000 ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01",1590738991000 ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01",1590738992000 ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01",1590738993000 ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02",1590738994000 ]
+ sql: |
+ SELECT id, c1, c7, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c7 ORDER BY {0}.c9 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c7 timestamp","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",1590738990000,30 ]
+ - [ 2,"bb",1590738990000,61 ]
+ - [ 3,"cc",1590738990000,93 ]
+ - [ 4,"dd",1590738990000,96 ]
+ - [ 5,"ee",1590738991000,34 ]
+ - id: 6
+ desc: bigint为partition by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c4:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,30,1.1,2.1,1590738991000,"2020-05-01" ]
+ - [ 3,"cc",20,30,1.1,2.1,1590738992000,"2020-05-01" ]
+ - [ 4,"dd",20,30,1.1,2.1,1590738993000,"2020-05-01" ]
+ - [ 5,"ee",21,31,1.2,2.2,1590738994000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c4 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",30,1 ]
+ - [ 2,"bb",30,2 ]
+ - [ 3,"cc",30,3 ]
+ - [ 4,"dd",30,3 ]
+ - [ 5,"ee",31,1 ]
+ - id: 7
+ desc: string为order by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,30,1.1,2.1,1590738991000,"2020-05-01" ]
+ - [ 3,"cc",20,30,1.1,2.1,1590738992000,"2020-05-01" ]
+ - [ 4,"dd",20,30,1.1,2.1,1590738993000,"2020-05-01" ]
+ - [ 5,"ee",21,31,1.2,2.2,1590738994000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c1 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+
+ -
+ id: 8
+ desc: bigint为order by-不加单位-bigint
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",30,1 ]
+ - [ 2,"bb",31,2 ]
+ - [ 3,"cc",32,3 ]
+ - [ 4,"dd",33,3 ]
+ - [ 5,"ee",34,1 ]
+ - id: 8-2
+ desc: int为order by-未命中TS
+ mode: rtidb-unsupport,cli-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c8:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",21,31,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",22,32,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",23,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",24,34,1.2,2.2,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c3 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",30,1 ]
+ - [ 2,"bb",31,2 ]
+ - [ 3,"cc",32,3 ]
+ - [ 4,"dd",33,3 ]
+ - [ 5,"ee",34,1 ]
+ - id: 8-3
+ desc: bigint为order by-加单位
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c4" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",30,1 ]
+ - [ 2,"bb",31,2 ]
+ - [ 3,"cc",32,3 ]
+ - [ 4,"dd",33,4 ]
+ - [ 5,"ee",34,1 ]
+ - id: 8-4
+ desc: int为order by-加单位-未命中索引
+ mode: rtidb-unsupport,cli-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",21,31,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",22,32,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",23,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",24,34,1.2,2.2,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c3 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c4 bigint","w1_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",30,1 ]
+ - [ 2,"bb",31,2 ]
+ - [ 3,"cc",32,3 ]
+ - [ 4,"dd",33,4 ]
+ - [ 5,"ee",34,1 ]
+ - id: 9
+ desc: float为order by
+ mode: rtidb-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c8:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.1,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",20,32,1.3,2.1,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",20,33,1.4,2.1,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",21,34,1.5,2.2,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c5, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c5 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 10
+ desc: double为order by
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-01" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-01" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-01" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-02" ]
+ sql: |
+ SELECT id, c1, c6, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c8 ORDER BY {0}.c6 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 11
+ desc: date为order by-未命中索引
+ mode: offline-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738990000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738990000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738990000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738991000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c8, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c8 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 12
+ desc: 多个pk
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1|c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ - [ 6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",20,30 ]
+ - [ 2,"aa",20,61 ]
+ - [ 3,"aa",20,93 ]
+ - [ 4,"aa",20,96 ]
+ - [ 5,"aa",24,34 ]
+ - [ 6,"bb",24,35 ]
+ - id: 13
+ desc: 两个pk都使用了索引
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1|c3:c7","index2:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"aa",24,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ - [ 6,"bb",24,35,1.5,2.5,1590738995000,"2020-05-06" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",20,30 ]
+ - [ 2,"aa",20,61 ]
+ - [ 3,"aa",20,93 ]
+ - [ 4,"aa",20,96 ]
+ - [ 5,"aa",24,34 ]
+ - [ 6,"bb",24,35 ]
+ - id: 14
+ desc: 多个window指定相同的pk
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,30,1 ]
+ - [ 2,"bb",20,61,2 ]
+ - [ 3,"cc",20,93,3 ]
+ - [ 4,"dd",20,96,3 ]
+ - [ 5,"ee",21,34,1 ]
+ - id: 15
+ desc: 多个window指定相不同的pk
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7", "index2:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,30,1 ]
+ - [ 2,"aa",20,61,2 ]
+ - [ 3,"cc",20,93,1 ]
+ - [ 4,"cc",20,96,2 ]
+ - [ 5,"ee",21,34,1 ]
+ - id: 16-1
+ desc: 多个window指定不同的ts, 数据时间乱序插入,batch模式预期
+ mode: request-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7", "index2:c3:c4" ]
+ rows:
+ - [ 1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,33,3 ]
+ - [ 2,"bb",20,64,2 ]
+ - [ 3,"cc",20,94,1 ]
+ - [ 4,"dd",20,93,3 ]
+ - [ 5,"ee",21,34,1 ]
+
+ - id: 16-2
+ desc: 多个window指定不同的ts, 数据时间乱序插入,request模式预期
+ mode: batch-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7", "index2:c3:c4" ]
+ rows:
+ - [ 1,"aa",20,33,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,30,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,32,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,33,1 ]
+ - [ 2,"bb",20,64,1 ]
+ - [ 3,"cc",20,94,1 ]
+ - [ 4,"dd",20,93,3 ]
+ - [ 5,"ee",21,34,1 ]
+ - id: 16-3
+ desc: 多个window指定不同的ts, 数据时间按序插入
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7", "index2:c3:c4" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,30,1 ]
+ - [ 2,"bb",20,61,2 ]
+ - [ 3,"cc",20,93,3 ]
+ - [ 4,"dd",20,96,3 ]
+ - [ 5,"ee",21,34,1 ]
+ - id: 17
+ desc: 两个window其中两个pk为索引列
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7","index2:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,30,1 ]
+ - [ 2,"aa",20,61,2 ]
+ - [ 3,"cc",20,93,1 ]
+ - [ 4,"cc",20,96,2 ]
+ - [ 5,"ee",21,34,1 ]
+ - id: 18
+ desc: 两个window其中一个pk和两个pk
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c1:c7","index2:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"cc",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"cc",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum, count(c4) OVER w2 as w2_c4_count FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint","w2_c4_count bigint" ]
+ rows:
+ - [ 1,"aa",20,30,1 ]
+ - [ 2,"aa",20,61,2 ]
+ - [ 3,"cc",20,93,1 ]
+ - [ 4,"cc",20,96,2 ]
+ - [ 5,"cc",21,34,1 ]
+ - id: 19
+ desc: 全部window
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, count(c3) OVER w1 as w1_c3_count, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ columns: [ "id int", "w1_c3_count bigint","w1_c4_sum bigint" ]
+ order: id
+ rows:
+ - [ 1, 1,30 ]
+ - [ 2, 2,61 ]
+ - [ 3, 3,93 ]
+ - [ 4, 3,96 ]
+ - [ 5, 1,34 ]
+ - id: 20
+ tags: [ "TODO", "@zhaowei暂时不要引入LIMIT的case,LIMIT的case需要spark,rtidb分别预期结果" ]
+ mode: request-unsupport
+ desc: 结合limit
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW) limit 2;
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","w1_c4_sum bigint" ]
+ rows:
+ - [ 1,"aa",20,30 ]
+ - [ 5,"ee",21,34 ]
+ - id: 22
+ desc: window的计算结果不使用别名
+ mode: cli-unsupport
+ inputs:
+ - columns: [ "id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date" ]
+ indexs: [ "index1:c3:c7" ]
+ rows:
+ - [ 1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01" ]
+ - [ 2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02" ]
+ - [ 3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03" ]
+ - [ 4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04" ]
+ - [ 5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05" ]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: [ "id int","c1 string","c3 int","sum(c4)over w1 bigint" ]
+ rows:
+ - [ 1,"aa",20,30 ]
+ - [ 2,"bb",20,61 ]
+ - [ 3,"cc",20,93 ]
+ - [ 4,"dd",20,96 ]
+ - [ 5,"ee",21,34 ]
+ - id: 23-1
+ desc: ROWS_RANGE Window with MaxSize
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0 ]
+ - [ "aa", 3, 1590738992000, 2.0 ]
+ - [ "aa", 4, 1590738993000, 2.0 ]
+ - [ "aa", 5, 1590738994000, 2.0 ]
+ - [ "aa", 6, 1590738995000, 2.0 ]
+ - [ "aa", 7, 1590738999000, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0 ]
+ - [ "aa", 9, 1590739002000, 2.0 ]
+ - id: 23-2
+ desc: ROWS_RANGE Current History Window with MaxSize 2
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 10);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0 ]
+ - [ "aa", 3, 1590738992000, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0 ]
+ - [ "aa", 5, 1590738994000, 4.0 ]
+ - [ "aa", 6, 1590738995000, 4.0 ]
+ - [ "aa", 7, 1590738999000, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0 ]
+ - id: 24-1
+ desc: ROWS_RANGE Pure History Window
+ version: 0.6.0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND 1s PRECEDING);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, NULL ]
+ - [ "aa", 2, 1590738991000, 1.0 ]
+ - [ "aa", 3, 1590738992000, 2.0 ]
+ - [ "aa", 4, 1590738993000, 3.0 ]
+ - [ "aa", 5, 1590738994000, 3.0 ]
+ - [ "aa", 6, 1590738995000, 3.0 ]
+ - [ "aa", 7, 1590738999000, NULL ]
+ - [ "aa", 8, 1590739001000, 1.0 ]
+ - [ "aa", 9, 1590739002000, 2.0 ]
+ - id: 24-2
+ desc: ROWS_RANGE Pure History Window With MaxSize
+ version: 0.6.0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 1000 PRECEDING, maxsize=2), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 1000 PRECEDING, maxsize=2))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND 1s PRECEDING MAXSIZE 2);
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, NULL ]
+ - [ "aa", 2, 1590738991000, 1.0 ]
+ - [ "aa", 3, 1590738992000, 2.0 ]
+ - [ "aa", 4, 1590738993000, 2.0 ]
+ - [ "aa", 5, 1590738994000, 2.0 ]
+ - [ "aa", 6, 1590738995000, 2.0 ]
+ - [ "aa", 7, 1590738999000, NULL ]
+ - [ "aa", 8, 1590739001000, 1.0 ]
+ - [ "aa", 9, 1590739002000, 2.0 ]
+ - id: 25
+ desc: ROWS_RANGE Current History Window with MaxSize Merge
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 4),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW MAXSIZE 4);
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=4), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=4))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0, 2.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 3.0 ]
+ - [ "aa", 5, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 6, 1590738995000, 4.0, 3.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 2.0 ]
+ - id: 26
+ desc: ROWS_RANGE Window with MaxSize Not Merge
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2);
+ request_plan: |
+ SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 3000 PRECEDING, 0 CURRENT, maxsize=2), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0, 2.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 2.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 2.0 ]
+ - [ "aa", 5, 1590738994000, 4.0, 2.0 ]
+ - [ "aa", 6, 1590738995000, 4.0, 2.0 ]
+ - [ "aa", 7, 1590738999000, 1.0, 1.0 ]
+ - [ "aa", 8, 1590739001000, 2.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 3.0, 2.0 ]
+
+ - id: 27-1
+ desc: ROWS and ROWS_RANGE Current History Window with MaxSize Merge
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND CURRENT ROW MAXSIZE 5);
+ request_plan: |
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0, 2.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 4.0, 5.0 ]
+ - [ "aa", 6, 1590738995000, 4.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 4.0, 4.0 ]
+ - [ "aa", 8, 1590739001000, 4.0, 3.0 ]
+ - [ "aa", 9, 1590739002000, 4.0, 3.0 ]
+ - id: 27-2
+ desc: ROWS and ROWS_RANGE Current History Window with MaxSize, MaxSize < ROWS Preceding, Can't Merge Frame
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 7 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND CURRENT ROW MAXSIZE 5);
+ request_plan: |
+ SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(auto_t0.c7, 7 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 0 CURRENT, maxsize=5), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, 1.0 ]
+ - [ "aa", 2, 1590738991000, 2.0, 2.0 ]
+ - [ "aa", 3, 1590738992000, 3.0, 3.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 4.0 ]
+ - [ "aa", 5, 1590738994000, 5.0, 5.0 ]
+ - [ "aa", 6, 1590738995000, 6.0, 5.0 ]
+ - [ "aa", 7, 1590738999000, 7.0, 4.0 ]
+ - [ "aa", 8, 1590739001000, 8.0, 3.0 ]
+ - [ "aa", 9, 1590739002000, 8.0, 3.0 ]
+
+ - id: 27-3
+ desc: ROWS and ROWS_RANGE Pure History Window Cant' Be Merge
+ version: 0.6.0
+ inputs:
+ - columns: [ "c1 string","c3 int","c4 double","c7 timestamp" ]
+ indexs: [ "index1:c1:c7" ]
+ rows:
+ - [ "aa",1, 1.0, 1590738990000 ]
+ - [ "aa",2, 1.0, 1590738991000 ]
+ - [ "aa",3, 1.0, 1590738992000 ]
+ - [ "aa",4, 1.0, 1590738993000 ]
+ - [ "aa",5, 1.0, 1590738994000 ]
+ - [ "aa",6, 1.0, 1590738995000 ]
+ - [ "aa",7, 1.0, 1590738999000 ]
+ - [ "aa",8, 1.0, 1590739001000 ]
+ - [ "aa",9, 1.0, 1590739002000 ]
+ sql: |
+ SELECT c1, c3, c7,
+ sum(c4) OVER w1 as w1_c4_sum,
+ sum(c4) OVER w2 as w2_c4_sum
+ FROM {0} WINDOW
+ w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW),
+ w2 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 6s PRECEDING AND 2s PRECEDING);
+ request_plan: |
+ SIMPLE_PROJECT(sources=(c1, c3, c7, w1_c4_sum, w2_c4_sum))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(auto_t0.c7, 3 PRECEDING, 0 CURRENT), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(auto_t0.c7, 6000 PRECEDING, 2000 PRECEDING), index_keys=(auto_t0.c1))
+ DATA_PROVIDER(request=auto_t0)
+ DATA_PROVIDER(type=Partition, table=auto_t0, index=index1)
+
+ expect:
+ order: c3
+ columns: [ "c1 string", "c3 int", "c7 timestamp", "w1_c4_sum double", "w2_c4_sum double" ]
+ rows:
+ - [ "aa", 1, 1590738990000, 1.0, NULL ]
+ - [ "aa", 2, 1590738991000, 2.0, NULL ]
+ - [ "aa", 3, 1590738992000, 3.0, 1.0 ]
+ - [ "aa", 4, 1590738993000, 4.0, 2.0 ]
+ - [ "aa", 5, 1590738994000, 4.0, 3.0 ]
+ - [ "aa", 6, 1590738995000, 4.0, 4.0 ]
+ - [ "aa", 7, 1590738999000, 4.0, 3.0 ]
+ - [ "aa", 8, 1590739001000, 4.0, 2.0 ]
+ - [ "aa", 9, 1590739002000, 4.0, 1.0 ]
+ -
+ id: 28
+ desc: 匿名窗口
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as w1_c4_sum FROM {0};
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 29
+ desc: 匿名窗口-没有小括号
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW as w1_c4_sum FROM {0};
+ expect:
+ success: false
+ -
+ id: 30
+ desc: smallint为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 smallint","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [2,"bb",20,61]
+ - [3,"cc",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ -
+ id: 31
+ desc: bool为partition by
+ inputs:
+ -
+ columns : ["id int","c1 string","c2 bool","c3 smallint","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c2:c7"]
+ rows:
+ - [1,"aa",true,20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",true,20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",true,20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - [4,"dd",true,20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",false,21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT id, c1, c2, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c2 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c2 bool","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",true,30]
+ - [2,"bb",true,61]
+ - [3,"cc",true,93]
+ - [4,"dd",true,96]
+ - [5,"ee",false,34]
+ -
+ id: 37
+ desc: no frame
+ inputs:
+ -
+ columns: ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7);
+ expect:
+ success: false
+ -
+ id: 38
+ desc: bigint为order by-加单位
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2s PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,4]
+ - [5,"ee",34,1]
+ -
+ id: 39
+ desc: timestamp为order by-不加单位
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990001,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990002,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990003,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 40
+ desc: timestamp为order by-加单位-m
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755660000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1606755720000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1606755780000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1606755840000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1606755660000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 41
+ desc: timestamp为order by-加单位-h
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606755600000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1606759200000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1606762800000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1606766400000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1606766400000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2h PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 42
+ desc: timestamp为order by-加单位-d
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1606752000000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1606838400000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1606924800000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1607011200000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1606752000000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2d PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 43
+ desc: bigint为order by-前后都不加单位,1-2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND 1 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,0]
+ - [2,"bb",31,1]
+ - [3,"cc",32,2]
+ - [4,"dd",33,2]
+ - [5,"ee",34,0]
+ -
+ id: 44
+ desc: bigint为order by-前后都不加单位,0-2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND 0 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 45
+ desc: bigint为order by-前后都不加单位,-1-2
+ inputs:
+ -
+ columns : ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c4"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,31,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.1,2.1,1590738990000,"2020-05-01"]
+ - [5,"ee",21,34,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c4, count(c4) OVER w1 as w1_c4_count FROM {0} WINDOW w1 AS (PARTITION BY {0}.c3 ORDER BY {0}.c4 ROWS_RANGE BETWEEN 2 PRECEDING AND -1 PRECEDING);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c4 bigint","w1_c4_count bigint"]
+ rows:
+ - [1,"aa",30,1]
+ - [2,"bb",31,2]
+ - [3,"cc",32,3]
+ - [4,"dd",33,3]
+ - [5,"ee",34,1]
+ -
+ id: 46
+ desc: timestamp为order by-2s-1s
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,NULL]
+ - ["aa",21,30]
+ - ["aa",22,61]
+ - ["aa",23,63]
+ - ["bb",24,NULL]
+ -
+ id: 47
+ desc: timestamp为order by-2s-0s
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0s PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 48
+ desc: timestamp为order by-2s-0
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 0 PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,30]
+ - ["aa",21,61]
+ - ["aa",22,93]
+ - ["aa",23,96]
+ - ["bb",24,34]
+ -
+ id: 49
+ desc: timestamp为order by-2s-1
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738990001,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738990002,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738990003,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738990004,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2s PRECEDING AND 1 PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20, NULL]
+ - ["aa",21,30]
+ - ["aa",22,61]
+ - ["aa",23,93]
+ - ["bb",24, NULL]
+ -
+ id: 50
+ desc: timestamp为order by-前后单位不一样
+ version: 0.6.0
+ inputs:
+ -
+ columns : ["c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - ["aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - ["aa",21,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - ["aa",22,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - ["aa",23,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - ["bb",24,34,1.5,2.5,1590738994000,"2020-05-05"]
+ sql: |
+ SELECT c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (PARTITION BY {0}.c1 ORDER BY {0}.c7 ROWS_RANGE BETWEEN 2m PRECEDING AND 1s PRECEDING);
+ expect:
+ order: c3
+ columns: ["c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - ["aa",20,NULL]
+ - ["aa",21,30]
+ - ["aa",22,61]
+ - ["aa",23,93]
+ - ["bb",24,NULL]
diff --git a/cases/integration_test/window/test_window_union.yaml b/cases/integration_test/window/test_window_union.yaml
new file mode 100644
index 00000000000..42f8843b555
--- /dev/null
+++ b/cases/integration_test/window/test_window_union.yaml
@@ -0,0 +1,1153 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 正常union
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 1
+ desc: union的表列个数不一致
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000]
+ - [3,"cc",20,32,1.3,2.3,1590738992000]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 2
+ desc: 列类型不一致
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 string"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 3
+ desc: 列名不一致
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ success: false
+ - id: 4
+ desc: 使用列别名后schema一致
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c9 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION (select id, c1,c3,c4,c5,c6,c7,c9 as c8 from {1})
+ PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 5
+ desc: 样本表使用索引,UNION表未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 6
+ desc: union表使用索引,样本表未命中索引
+ mode: rtidb-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 7
+ desc: 样本表union表都使用索引
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 8
+ desc: union多表
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1},{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,126]
+ - [5,"dd",20,129]
+ - [6,"ee",21,34]
+ - id: 9
+ desc: 结合limit
+ mode: request-unsupport
+ tags: ["TODO", "@zhaowei remove limit case here"]
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) limit 2;
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [5,"ee",21,34]
+ - id: 10
+ desc: 使用两个pk
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"aa",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - [6,"ee",21,33,1.4,2.4,1590738995000,"2020-05-04"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1|c3:c7"]
+ rows:
+ - [2,"aa",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"aa",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c1,{0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"aa",20,96]
+ - [5,"ee",21,34]
+ - [6,"ee",21,67]
+ - id: 11
+ desc: 样本表和union表都使用子查询
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM (select * from {0}) WINDOW w1 AS (UNION (select * from {1}) PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 12
+ desc: union多表,其中一个子查询
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [6,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION (select * from {1}),{2} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,126]
+ - [5,"dd",20,129]
+ - [6,"ee",21,34]
+ - id: 13
+ desc: 样本表不进入window
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW
+ w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+
+ - id: 14-1
+ desc: WINDOW UNION 子查询, column cast 和 const cast子查询, string cast as date
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4str string","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2, "bb", 20, "31", 1.2, 2.2, 1590738991000]
+ - [3, "cc", 20, "32", 1.3, 2.3, 1590738992000]
+ sql: |
+ SELECT id, c1, c3, c8,
+ distinct_count(c8) OVER w1 as w1_c8_dis_cnt,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM {0} WINDOW
+ w1 AS (UNION (select id, c1, c3, bigint(c4str) as c4, c5, c6, c7, date("2020-10-01") as c8 from {1})
+ PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"]
+ rows:
+ - [1, "aa", 20, "2020-05-01", 2, 93]
+ - [4, "dd", 20, "2020-05-04", 2, 96]
+ - [5, "ee", 21, "2020-05-05", 1, 34]
+ - id: 14-2
+ desc: WINDOW UNION 子查询, column cast 和 const cast子查询. cast column as partition key
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c1:c7"]
+ rows:
+ - [1,"aa",20.0, 30,1.1,2.1,1590738993000,"2020-05-01"]
+ - [4,"dd",20.1, 33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21.2, 34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000]
+ - [3,"cc",20,32,1.3,2.3,1590738992000]
+ sql: |
+ SELECT id, c1, c3, c8,
+ distinct_count(c8) OVER w1 as w1_c8_dis_cnt,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM (select id, c1, int(c3f) as c3, c4, c5, c6, c7, c8 from {0}) WINDOW
+ w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1})
+ PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c3 int", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"]
+ rows:
+ - [1, "aa", 20, "2020-05-01", 2, 93]
+ - [4, "dd", 20, "2020-05-04", 2, 96]
+ - [5, "ee", 21, "2020-05-05", 1, 34]
+ - id: 14-3
+ desc: WINDOW UNION 子查询, timestamp(string) as window ts
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"]
+ - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"]
+ - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2, 1590738991000]
+ - [3,"cc",20,32,1.3,2.3, 1590738992000]
+ sql: |
+ SELECT id, c1, c3, c7, c8,
+ distinct_count(c8) OVER w1 as w1_c8_dis_cnt,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM (select id, c1, int(c3f) as c3, c4, c5, c6, timestamp(c7str) as c7, c8 from {0}) WINDOW
+ w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, date("2020-10-01") as c8 from {1})
+ PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"]
+ rows:
+ - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93]
+ - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96]
+ - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34]
+ - id: 14-4
+ desc: WINDOW UNION 子查询, cast另一种写法 cast(column as timestamp) as window ts
+ mode: offline-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3f float","c4 bigint","c5 float","c6 double","c7str string","c8 date"]
+ indexs: ["index1:c1:c4"]
+ rows:
+ - [1,"aa",20.0, 30,1.1,2.1,"2020-05-29 15:56:33","2020-05-01"]
+ - [4,"dd",20.1, 33,1.4,2.4,"2020-05-29 15:56:34","2020-05-04"]
+ - [5,"ee",21.2, 34,1.5,2.5,"2020-05-29 15:56:35","2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2, 1590738991000]
+ - [3,"cc",20,32,1.3,2.3, 1590738992000]
+ sql: |
+ SELECT id, c1, c3, c7, c8,
+ distinct_count(c8) OVER w1 as w1_c8_dis_cnt,
+ sum(c4) OVER w1 as w1_c4_sum
+ FROM (select id, c1, cast(c3f as int) as c3, c4, c5, c6, cast(c7str as timestamp) as c7, c8 from {0}) WINDOW
+ w1 AS (UNION (select id, c1, c3, c4, c5, c6, c7, cast("2020-10-01" as date) as c8 from {1})
+ PARTITION BY c3 ORDER BY c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW INSTANCE_NOT_IN_WINDOW);
+ expect:
+ order: id
+ columns: ["id int", "c1 string", "c3 int", "c7 timestamp", "c8 date", "w1_c8_dis_cnt bigint", "w1_c4_sum bigint"]
+ rows:
+ - [1, "aa", 20, 1590738993000, "2020-05-01", 2, 93]
+ - [4, "dd", 20, 1590738994000, "2020-05-04", 2, 96]
+ - [5, "ee", 21, 1590738995000, "2020-05-05", 1, 34]
+ - id: 16
+ desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志
+ mode: offline-unsupport
+ db: db_wzx
+ sql: |
+ select
+ c1,
+ min(c1) over table_1_s2_t1 as table_1_c1_9,
+ min(c2) over table_1_s2_t1 as table_1_c2_10,
+ identity(case when lag(d1, 1) != null then distinct_count(d1) else null end) over table_1_s2_t1 as table_1_d1_11,
+ identity(case when lag(d2, 1) != null then distinct_count(d2) else null end) over table_1_s2_t1 as table_1_d2_12,
+ identity(case when lag(s1, 1) != null then distinct_count(s1) else null end) over table_1_s2_t1 as table_1_s1_13
+ from
+ {0} as main
+ window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ inputs:
+ - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"]
+ rows:
+ - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ expect:
+ order: c1
+ columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"]
+ rows:
+ - [1, 1, 2, NULL, NULL, NULL]
+
+ - id: 16-2
+ desc: 主表window 添加 INSTANCE_NOT_IN_WINDOW 没有明显错误日志 case when写法优化
+ mode: offline-unsupport
+ db: db_wzx
+ sql: |
+ select
+ c1,
+ min(c1) over table_1_s2_t1 as table_1_c1_9,
+ min(c2) over table_1_s2_t1 as table_1_c2_10,
+ case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11,
+ case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12,
+ case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13
+ from
+ {0} as main
+ window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ inputs:
+ - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"]
+ rows:
+ - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ expect:
+ order: c1
+ columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"]
+ rows:
+ - [1, 1, 2, NULL, NULL, NULL]
+ - id: 17
+ desc: 两个索引不一致的表union
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7","index2:c1:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+
+ # test correctness for window union when there are rows in union rows and original rows whose ts/key is the same
+ # refer https://github.com/4paradigm/OpenMLDB/issues/1776#issuecomment-1121258571 for the specification
+ # - 18-1 & 18-2 test simple case for UNION ROWS_RANGE and UNION ROWS
+ # - 18-3 test test UNION ROWS_RANGE with MAXSIZE
+ # - 18-4 & 18-5 test EXCLUDE CURRENT_TIME for UNION ROWS_RANGE/ROWS
+ - id: 18-1
+ desc: |
+ when UNION ROWS_RANGE has the same key with original rows, original rows first then union rows
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 400
+ 3, 200, 112, 999
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ 3, 199, 112, 44
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ union t2
+ partition by `g` order by `ts`
+ rows_range between 1s preceding and 0s preceding);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 3, 233, 21, 200
+ 2, 4, 400, 21, 21
+ 3, 2, 999, 44, 44
+ - id: 18-2
+ desc: |
+ when UNION ROWS has the same key with original rows, original rows first then union rows,
+ union rows filtered out first for max window size limitation
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 400
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 88, 111, 999
+ 1, 100, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from (select * from t1) window w as(
+ union (select * from t2)
+ partition by `g` order by `ts`
+ ROWS BETWEEN 2 preceding and 0 preceding);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 3, 233, 21, 200
+ 2, 3, 400, 21, 21
+ - id: 18-3
+ mode: disk-unsupport
+ desc: |
+ when UNION ROWS_RANGE MAXSIZE has the same key with original rows, original rows first then union rows
+ union rows filtered out for max window size first
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 0
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from (select * from t1) window w as(
+ union (select * from t2)
+ partition by `g` order by `ts`
+ rows_range between 1s preceding and 0s preceding MAXSIZE 2);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 2, 200, 21, 200
+ 2, 2, 21, 0, 21
+ - id: 18-4
+ mode: disk-unsupport
+ desc: |
+ when UNION ROWS_RANGE EXCLUDE CURRENT_TIME has the same key with original rows, original rows first then union rows
+ other rows except current row filtered out by EXCLUDE CURRENT_TIME
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 0, 0, 111, 19
+ 1, 0, 111, 18
+ 2, 100, 111, 21
+ 3, 100, 111, 5
+ 4, 101, 111, 100
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ # raw union window (before filter)
+ # 0, 0, 111, 19
+ # 1, 0, 111, 18
+ # 1, 99, 111, 233 (t2)
+ # 1, 100, 111, 200 (t2)
+ # 2, 100, 111, 21
+ # 3, 100, 111, 5
+ # 1, 101, 111, 17 (t2)
+ # 4, 101, 111, 100
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from (select * from t1) window w as(
+ union (select * from t2)
+ partition by `g` order by `ts`
+ rows_range between 1s preceding and 0s preceding EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [0, 1, 19, 19, NULL]
+ - [1, 1, 18, 18, NULL]
+ - [2, 4, 233, 18, 233]
+ - [3, 4, 233, 5, 233]
+ - [4, 7, 233, 5, 5]
+
+ - id: 18-5
+ mode: disk-unsupport
+ desc: |
+ UNION ROWS current time rows filtered out
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 87, 111, 300
+ 1, 88, 111, 999
+ 1, 99, 111, 233
+ 1, 100, 111, 200
+ 1, 101, 111, 17
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from (select * from t1) window w as(
+ union (select * from t2)
+ partition by `g` order by `ts`
+ ROWS BETWEEN 2 preceding and 0 preceding EXCLUDE CURRENT_TIME);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 3, 999, 21, 233
+ 2, 3, 10000, 233, 233
+
+ # for the case that window unions multiple tables
+ # the order for rows between those multiple union tables that has same ts key,
+ # is undefined by specification.
+ # However, SQL engine explicitly use the order as master table -> first union table in SQL -> second union table in SQL -> ....
+ #
+ # 19-* series test case tests for this for SQL engine only, you should never reply on this behavior anyway
+ - id: 19-1
+ mode: disk-unsupport
+ desc: |
+ window unions multiple tables, the order for rows in union tables with same ts is explicitly as the order in SQL
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 88, 111, 999
+ 1, 100, 111, 233
+ 1, 100, 111, 200
+ - name: t3
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 0
+ 1, 100, 111, 33
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1,
+ lag(val, 2) over w as l2
+ from t1 window w as(
+ union t2,t3
+ partition by `g` order by `ts`
+ ROWS_RANGE BETWEEN 2s preceding and 0s preceding);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ - l2 int
+ order: id
+ data: |
+ 1, 6, 999, 0, 200, 233
+ 2, 7, 10000, 0, 21, 200
+ - id: 19-2
+ mode: disk-unsupport
+ desc: |
+ rows order for pure history window union
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 21
+ 2, 100, 111, 10000
+ - name: t2
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 88, 111, 999
+ 1, 100, 111, 233
+ 1, 100, 111, 200
+ - name: t3
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 100, 111, 0
+ 1, 100, 111, 33
+ sql: |
+ select
+ id, count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1,
+ lag(val, 2) over w as l2,
+ lag(val, 3) over w as l3
+ from t1 window w as(
+ union t2,t3
+ partition by `g` order by `ts`
+ ROWS BETWEEN 3 preceding and 1 preceding INSTANCE_NOT_IN_WINDOW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ - l2 int
+ - l3 int
+ order: id
+ data: |
+ 1, 3, 233, 33, 200, 233, 33
+ 2, 3, 233, 33, 200, 233, 33
+ - id: 18
+ desc: 主表ts都大于副表的
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738995000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,93]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 19
+ desc: 主表ts都小于副表的
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738991000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738992000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738993000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738994000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,63]
+ - [5,"ee",21,34]
+ - id: 20
+ desc: 主表副表ts有交集
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 21
+ desc: 主表和副表分片在同一节点上
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ replicaNum: 3
+ partitionNum: 1
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ replicaNum: 3
+ partitionNum: 1
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ followers: [ "{tb_endpoint_0}","{tb_endpoint_2}" ]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 21
+ desc: 主表和副表分片在不同的节点上
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738994000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738995000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,30]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
+ - id: 22
+ desc: 两张副表,一张和主表在同一节点,另一张不在
+ db: db_wzx
+ sql: |
+ select
+ c1,
+ min(c1) over table_1_s2_t1 as table_1_c1_9,
+ min(c2) over table_1_s2_t1 as table_1_c2_10,
+ case when !isnull(lag(d1, 1) over table_1_s2_t1) then distinct_count(d1) over table_1_s2_t1 else null end as table_1_d1_11,
+ case when !isnull(lag(d2, 1) over table_1_s2_t1) then distinct_count(d2) over table_1_s2_t1 else null end as table_1_d2_12,
+ case when !isnull(lag(s1, 1) over table_1_s2_t1) then distinct_count(s1) over table_1_s2_t1 else null end as table_1_s1_13
+ from
+ {0} as main
+ window table_1_s2_t1 as (partition by s2 order by t1 rows_range between 1d preceding and 0s preceding INSTANCE_NOT_IN_WINDOW);
+ inputs:
+ - columns: ["label int", "s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1", "index2:s1:t1", "index3:d1:t1", "index4:d2:t1"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - [1, "1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s2:t1"]
+ distribution:
+ - leader: "{tb_endpoint_1}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ - columns: ["s1 string","s2 string", "t1 timestamp","t2 date","d1 string", "d2 string", "c1 int", "c2 bigint",
+ "ai string", "kn string", "ks string"]
+ indexs: ["index1:s1:t1"]
+ distribution:
+ - leader: "{tb_endpoint_0}"
+ rows:
+ - ["1", "2", 1600946381104, "2019-07-18", "xx", "xx", 1, 2 , "3x","4x","kx"]
+ expect:
+ order: c1
+ columns: ["c1 int", "table_1_c1_9 int", "table_1_c2_10 bigint", "table_1_d1_11 bigint", "table_1_d2_12 bigint", "table_1_s1_13 bigint"]
+ rows:
+ - [1, 1, 2, NULL, NULL, NULL]
diff --git a/cases/integration_test/window/test_window_union_cluster_thousand.yaml b/cases/integration_test/window/test_window_union_cluster_thousand.yaml
new file mode 100644
index 00000000000..aa12f1b549f
--- /dev/null
+++ b/cases/integration_test/window/test_window_union_cluster_thousand.yaml
@@ -0,0 +1,1044 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+
+db: test_zw
+debugs: []
+version: 0.5.0
+cases:
+ - id: 0
+ desc: 正常union
+ mode: disk-unsupport
+ inputs:
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [1,"aa",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [4,"dd",20,33,1.4,2.4,1590738993000,"2020-05-04"]
+ - [5,"ee",21,34,1.5,2.5,1590738994000,"2020-05-05"]
+ - columns: ["id int","c1 string","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date"]
+ indexs: ["index1:c3:c7"]
+ rows:
+ - [2,"bb",20,31,1.2,2.2,1590738991000,"2020-05-02"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [2,"bb",20,30,1.1,2.1,1590738990000,"2020-05-01"]
+ - [3,"cc",20,32,1.3,2.3,1590738992000,"2020-05-03"]
+ sql: |
+ SELECT id, c1, c3, sum(c4) OVER w1 as w1_c4_sum FROM {0} WINDOW w1 AS (UNION {1} PARTITION BY {0}.c3 ORDER BY {0}.c7 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+ expect:
+ order: id
+ columns: ["id int","c1 string","c3 int","w1_c4_sum bigint"]
+ rows:
+ - [1,"aa",20,90]
+ - [4,"dd",20,96]
+ - [5,"ee",21,34]
\ No newline at end of file
diff --git a/cases/integration_test/window/window_attributes.yaml b/cases/integration_test/window/window_attributes.yaml
new file mode 100644
index 00000000000..c77844b7b00
--- /dev/null
+++ b/cases/integration_test/window/window_attributes.yaml
@@ -0,0 +1,536 @@
+# window query test with OpenMLDB specific window attributes:
+# - EXCLUDE CURRENT_TIME
+# - EXCLUDE CURRENT_ROW
+# - INSTANCE_NOT_IN_WINDOW
+# - MAXSIZE
+
+debugs: []
+version: 0.6.0
+db: test_java
+cases:
+ - id: 0
+ desc: ROWS_RANGE window with exclude_current_row
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 0, 0, 111, 0
+ 1, 0, 111, 0
+ 2, 99000, 111, 21
+ 3, 100000, 111, 22
+ 4, 101000, 111, 23
+ 5, 100000, 114, 56
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ partition by `g` order by `ts`
+ ROWS_RANGE between 2s PRECEDING and 0s preceding EXCLUDE CURRENT_ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation, EXCLUDE_CURRENT_ROW)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 2000 PRECEDING, 0 PRECEDING))
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(EXCLUDE_CURRENT_ROW, partition_keys=(), orders=(ASC), range=(ts, 2000 PRECEDING, 0 PRECEDING), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [0, 0, NULL, NULL, NULL]
+ - [1, 1, 0, 0, 0]
+ - [2, 0, NULL, NULL, 0]
+ - [3, 1, 21, 21, 21]
+ - [4, 2, 22, 21, 22]
+ - [5, 0, NULL, NULL, NULL]
+ - id: 1
+ desc: |
+ ROWS window with exclude_current_row, '0 PRECEDING EXCLUDE CURRENT_ROW' actually is the same as '0 OPEN PRECEDING'
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 100000, 114, 56
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ from t1 window w as(
+ partition by `g` order by `ts`
+ ROWS between 2 PRECEDING and 0 preceding EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
+ - id: 2
+ desc: |
+ ROWS_RANGE pure-history window with exclude_current_row
+ whether EXCLUDE CURRENT_ROW is set do not matter
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 100000, 114, 56
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS_RANGE BETWEEN 2s PRECEDING AND 1s PRECEDING EXCLUDE CURRENT_ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 2000 PRECEDING, 1000 PRECEDING))
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(ts, 2000 PRECEDING, 1000 PRECEDING), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
+ - id: 3
+ desc: |
+ ROWS pure-history window with exclude_current_row
+ whether EXCLUDE CURRENT_ROW is set do not matter
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 100000, 114, 56
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING EXCLUDE CURRENT_ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 2 PRECEDING, 0 OPEN PRECEDING))
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 2 PRECEDING, 0 OPEN PRECEDING), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 0, NULL, NULL, NULL]
+
+ - id: 4
+ desc: |
+ rows_range current history window, exclude current_row with maxsize
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 100000, 114, 56
+ 6, 102000, 114, 52
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS_RANGE BETWEEN 3s PRECEDING AND 0s PRECEDING MAXSIZE 2 EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
+
+ - id: 5
+ desc: |
+ ROWS_RANGE window with end frame OPEN, exclude current_row do not matter
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 100000, 114, 56
+ 6, 102000, 114, 52
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS_RANGE BETWEEN 3s PRECEDING AND 0s OPEN PRECEDING MAXSIZE 2 EXCLUDE CURRENT_ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), range=(ts, 3000 PRECEDING, 0 OPEN PRECEDING, maxsize=2))
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(ts, 3000 PRECEDING, 0 OPEN PRECEDING, maxsize=2), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
+
+ - id: 6
+ desc: |
+ ROWS window with end frame OPEN, exclude current_row do not matter
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 100000, 114, 56
+ 6, 102000, 114, 52
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS BETWEEN 3 PRECEDING AND 0 OPEN PRECEDING EXCLUDE CURRENT_ROW);
+ batch_plan: |
+ PROJECT(type=WindowAggregation)
+ +-WINDOW(partition_keys=(g), orders=(ts ASC), rows=(ts, 3 PRECEDING, 0 OPEN PRECEDING))
+ PROJECT(type=WindowAggregation, NEED_APPEND_INPUT)
+ +-WINDOW(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT))
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ request_plan: |
+ SIMPLE_PROJECT(sources=(id, cnt, mv, mi, l1))
+ REQUEST_JOIN(type=kJoinTypeConcat)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 3 PRECEDING, 0 OPEN PRECEDING), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ PROJECT(type=Aggregation)
+ REQUEST_UNION(partition_keys=(), orders=(ASC), rows=(ts, 1 PRECEDING, 0 CURRENT), index_keys=(g))
+ DATA_PROVIDER(request=t1)
+ DATA_PROVIDER(type=Partition, table=t1, index=idx)
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 3, 23, 21, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
+
+ - id: 7
+ desc: |
+ ROWS_RANGE window with end frame 'CURRENT_ROW', exclude current_row
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 100000, 114, 56
+ 6, 102000, 114, 52
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS_RANGE BETWEEN 3s PRECEDING AND CURRENT ROW MAXSIZE 2 EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ rows:
+ - [1, 0, NULL, NULL, NULL]
+ - [2, 1, 21, 21, 21]
+ - [3, 2, 22, 21, 22]
+ - [4, 2, 23, 22, 23]
+ - [5, 0, NULL, NULL, NULL]
+ - [6, 1, 56, 56, 56]
+
+ - id: 8
+ desc: |
+ ROWS window with end frame 'CURRENT_ROW', exclude current_row
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 100000, 114, 56
+ 6, 102000, 114, 52
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 0, NULL, NULL, NULL
+ 2, 1, 21, 21, 21
+ 3, 2, 22, 21, 22
+ 4, 3, 23, 21, 23
+ 5, 0, NULL, NULL, NULL
+ 6, 1, 56, 56, 56
+ - id: 9
+ desc: |
+ ROWS Window with exclude current_time and exclude current_row
+ mode: disk-unsupport
+ inputs:
+ - name: t1
+ columns:
+ - id int
+ - ts timestamp
+ - g int
+ - val int
+ indexs:
+ - idx:g:ts
+ data: |
+ 1, 99000, 111, 21
+ 2, 100000, 111, 22
+ 3, 101000, 111, 23
+ 4, 102000, 111, 44
+ 5, 0, 114, 0
+ 6, 0, 114, 99
+ 7, 100000, 114, 56
+ 8, 102000, 114, 52
+ 9, 104000, 114, 33
+ sql: |
+ select
+ id,
+ count(val) over w as cnt,
+ max(val) over w as mv,
+ min(val) over w as mi,
+ lag(val, 1) over w as l1
+ FROM t1 WINDOW w as(
+ PARTITION by `g` ORDER by `ts`
+ ROWS BETWEEN 3 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME EXCLUDE CURRENT_ROW);
+ expect:
+ columns:
+ - id int
+ - cnt int64
+ - mv int
+ - mi int
+ - l1 int
+ order: id
+ data: |
+ 1, 0, NULL, NULL, NULL
+ 2, 1, 21, 21, 21
+ 3, 2, 22, 21, 22
+ 4, 3, 23, 21, 23
+ 5, 0, NULL, NULL, NULL
+ 6, 0, NULL, NULL, NULL
+ 7, 2, 99, 0, 99
+ 8, 3, 99, 0, 56
+ 9, 3, 99, 52, 52
diff --git a/cases/plan/back_quote_identifier.yaml b/cases/plan/back_quote_identifier.yaml
index c01532b9eb8..575aed3a2d9 100644
--- a/cases/plan/back_quote_identifier.yaml
+++ b/cases/plan/back_quote_identifier.yaml
@@ -157,17 +157,19 @@ cases:
+-node[kDistributions]
+-distribution_list[list]:
+-0:
- | +-node[kPartitionMeta]
- | +-endpoint: leader1
- | +-role_type: leader
- +-1:
- | +-node[kPartitionMeta]
- | +-endpoint: fo1
- | +-role_type: follower
- +-2:
- +-node[kPartitionMeta]
- +-endpoint: fo2
- +-role_type: follower
+ +-list[list]:
+ +-0:
+ | +-node[kPartitionMeta]
+ | +-endpoint: leader1
+ | +-role_type: leader
+ +-1:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo1
+ | +-role_type: follower
+ +-2:
+ +-node[kPartitionMeta]
+ +-endpoint: fo2
+ +-role_type: follower
- id: 23
desc: create index with back quote item name
sql: |
diff --git a/cases/plan/create.yaml b/cases/plan/create.yaml
index 58fd9199212..1a3d1ea0348 100644
--- a/cases/plan/create.yaml
+++ b/cases/plan/create.yaml
@@ -189,17 +189,19 @@ cases:
+-node[kDistributions]
+-distribution_list[list]:
+-0:
- | +-node[kPartitionMeta]
- | +-endpoint: leader1
- | +-role_type: leader
- +-1:
- | +-node[kPartitionMeta]
- | +-endpoint: fo1
- | +-role_type: follower
- +-2:
- +-node[kPartitionMeta]
- +-endpoint: fo2
- +-role_type: follower
+ +-list[list]:
+ +-0:
+ | +-node[kPartitionMeta]
+ | +-endpoint: leader1
+ | +-role_type: leader
+ +-1:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo1
+ | +-role_type: follower
+ +-2:
+ +-node[kPartitionMeta]
+ +-endpoint: fo2
+ +-role_type: follower
- id: 14
desc: Create table statement (typical 2)
sql: |
@@ -242,17 +244,19 @@ cases:
+-node[kDistributions]
+-distribution_list[list]:
+-0:
- | +-node[kPartitionMeta]
- | +-endpoint: leader1
- | +-role_type: leader
- +-1:
- | +-node[kPartitionMeta]
- | +-endpoint: fo1
- | +-role_type: follower
- +-2:
- +-node[kPartitionMeta]
- +-endpoint: fo2
- +-role_type: follower
+ +-list[list]:
+ +-0:
+ | +-node[kPartitionMeta]
+ | +-endpoint: leader1
+ | +-role_type: leader
+ +-1:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo1
+ | +-role_type: follower
+ +-2:
+ +-node[kPartitionMeta]
+ +-endpoint: fo2
+ +-role_type: follower
- id: 15
desc: Create table statement (typical 3)
@@ -296,17 +300,19 @@ cases:
+-node[kDistributions]
+-distribution_list[list]:
+-0:
- | +-node[kPartitionMeta]
- | +-endpoint: leader1
- | +-role_type: leader
- +-1:
- | +-node[kPartitionMeta]
- | +-endpoint: fo1
- | +-role_type: follower
- +-2:
- +-node[kPartitionMeta]
- +-endpoint: fo2
- +-role_type: follower
+ +-list[list]:
+ +-0:
+ | +-node[kPartitionMeta]
+ | +-endpoint: leader1
+ | +-role_type: leader
+ +-1:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo1
+ | +-role_type: follower
+ +-2:
+ +-node[kPartitionMeta]
+ +-endpoint: fo2
+ +-role_type: follower
- id: 16
desc: empty create table statement
@@ -914,4 +920,74 @@ cases:
+-table_option_list[list]:
+-0:
+-node[kStorageMode]
- +-storage_mode: hdd
\ No newline at end of file
+ +-storage_mode: hdd
+
+ - id: 31
+ desc: Create table statement (typical 4)
+ sql: |
+ create table if not exists t3 (a int32, b timestamp, index(key=a, ignored_key='seb', ts=b, ttl=1800,
+ ttl_type=absorlat, version=a ) ) options (replicanum = 4, partitionnum = 5, ignored_option = 'abc',
+ distribution = [ ('leader1', ['fo1', 'fo2']), ('leader2', ['fo1', 'fo2'])])
+ expect:
+ node_tree_str: |
+ +-node[CREATE]
+ +-table: t3
+ +-IF NOT EXIST: 1
+ +-column_desc_list[list]:
+ | +-0:
+ | | +-node[kColumnDesc]
+ | | +-column_name: a
+ | | +-column_type: int32
+ | | +-NOT NULL: 0
+ | +-1:
+ | | +-node[kColumnDesc]
+ | | +-column_name: b
+ | | +-column_type: timestamp
+ | | +-NOT NULL: 0
+ | +-2:
+ | +-node[kColumnIndex]
+ | +-keys: [a]
+ | +-ts_col: b
+ | +-abs_ttl: -2
+ | +-lat_ttl: 1800
+ | +-ttl_type: absorlat
+ | +-version_column: a
+ | +-version_count: 1
+ +-table_option_list[list]:
+ +-0:
+ | +-node[kReplicaNum]
+ | +-replica_num: 4
+ +-1:
+ | +-node[kPartitionNum]
+ | +-partition_num: 5
+ +-2:
+ +-node[kDistributions]
+ +-distribution_list[list]:
+ +-0:
+ | +-list[list]:
+ | +-0:
+ | | +-node[kPartitionMeta]
+ | | +-endpoint: leader1
+ | | +-role_type: leader
+ | +-1:
+ | | +-node[kPartitionMeta]
+ | | +-endpoint: fo1
+ | | +-role_type: follower
+ | +-2:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo2
+ | +-role_type: follower
+ +-1:
+ +-list[list]:
+ +-0:
+ | +-node[kPartitionMeta]
+ | +-endpoint: leader2
+ | +-role_type: leader
+ +-1:
+ | +-node[kPartitionMeta]
+ | +-endpoint: fo1
+ | +-role_type: follower
+ +-2:
+ +-node[kPartitionMeta]
+ +-endpoint: fo2
+ +-role_type: follower
diff --git a/cases/plan/error_unsupport_sql.yaml b/cases/plan/error_unsupport_sql.yaml
index 8bda40ec851..9681a75cf10 100644
--- a/cases/plan/error_unsupport_sql.yaml
+++ b/cases/plan/error_unsupport_sql.yaml
@@ -96,9 +96,6 @@ cases:
sql: |
SELECT SUM(COL1), SUM(COL) over w1 FROM t1
window as w1(partition by col0 order by col5 rows between 100 preceding and current row);
- - id: delete_table
- sql: |
- delete from t1 where id = 12;
- id: delete_job_2
desc: abc here is not job id but alias
sql: |
diff --git a/cases/query/const_query.yaml b/cases/query/const_query.yaml
index 5591f55e6d3..304f0486073 100644
--- a/cases/query/const_query.yaml
+++ b/cases/query/const_query.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: select const number
@@ -21,7 +22,8 @@ cases:
sql: |
select 1 as id, 2 as col1, 3.3 as col2;
expect:
- schema: id:int32, col1:int, col2:double
+# schema: id:int32, col1:int, col2:double
+ columns: ["id int","col1 int","col2 double"]
order: id
rows:
- [1, 2, 3.3]
@@ -32,7 +34,8 @@ cases:
sql: |
select 1 as id, "hello_world" as col1;
expect:
- schema: id:int32, col1:string
+# schema: id:int32, col1:string
+ columns: ["id int","col1 string"]
order: id
rows:
- [1, "hello_world"]
@@ -43,7 +46,8 @@ cases:
sql: |
select 1 as id, substring("hello_world", 3, 6) as col1;
expect:
- schema: id:int32, col1:string
+# schema: id:int32, col1:string
+ columns: ["id int","col1 string"]
order: id
rows:
- [1, "llo_wo"]
@@ -54,7 +58,8 @@ cases:
sql: |
select 1 as id, substring("hello_world", 3) as col1;
expect:
- schema: id:int32, col1:string
+# schema: id:int32, col1:string
+ columns: ["id int","col1 string"]
order: id
rows:
- [1, "llo_world"]
@@ -65,13 +70,14 @@ cases:
sql: |
select 1 as id, concat("hello", "world", "abc") as col1;
expect:
- schema: id:int32, col1:string
+ columns: ["id int","col1 string"]
order: id
rows:
- [1, "helloworldabc"]
- id: 5
desc: cast常量 using CAST operator
mode: request-unsupport
+ db: db1
inputs:
- columns: ["c1 int", "c2 string", "c5 bigint"]
indexs: ["index1:c1:c5"]
@@ -82,10 +88,11 @@ cases:
expect:
columns: ["c1 int", "c2 bigint", "c3 float", "c4 double", "c5 timestamp", "c6 date", "c7 string"]
rows:
- - [10, 10, 10.0, 10.0, 1590115460000, 2020-05-20, "10"]
+ - [10, 10, 10.0, 10.0, 1590115460000, '2020-05-20', "10"]
- id: 6
desc: cast NULL常量 using CAST operator
mode: request-unsupport
+ db: db1
inputs:
- columns: ["c1 int", "c2 string", "c5 bigint"]
indexs: ["index1:c1:c5"]
@@ -100,6 +107,7 @@ cases:
- id: 7
desc: cast常量 using type() function
mode: request-unsupport
+ db: db1
inputs:
- columns: ["c1 int", "c2 string", "c5 bigint"]
indexs: ["index1:c1:c5"]
@@ -110,10 +118,11 @@ cases:
expect:
columns: ["c1 int", "c2 bigint", "c3 float", "c4 double", "c5 timestamp", "c6 date", "c7 string"]
rows:
- - [10, 10, 10.0, 10.0, 1590115460000, 2020-05-20, "10"]
+ - [10, 10, 10.0, 10.0, 1590115460000, '2020-05-20', "10"]
- id: 8
desc: cast NULL常量 using type(NULL) function
mode: request-unsupport
+ db: db1
inputs:
- columns: ["c1 int", "c2 string", "c5 bigint"]
indexs: ["index1:c1:c5"]
@@ -128,6 +137,7 @@ cases:
- id: 9
desc: differnt const node type
mode: request-unsupport
+ db: db1
sql: |
select true c1, int16(3) c2, 13 c3, 10.0 c4, 'a string' c5, date(timestamp(1590115420000)) c6, timestamp(1590115420000) c7;
expect:
diff --git a/cases/query/last_join_window_query.yaml b/cases/query/last_join_window_query.yaml
index a8b0775ef80..7af728bb2a2 100644
--- a/cases/query/last_join_window_query.yaml
+++ b/cases/query/last_join_window_query.yaml
@@ -164,3 +164,74 @@ cases:
3, 55, 1590115420001, 1590115420001, CCC, 3
4, 55, 1590115420002, 1590115420002, DDDD, 7
5, 55, 1590115420003, 1590115420002, FFFFFF, 12
+
+ - id: 4
+ desc: |
+ window with a last join subquery
+ inputs:
+ - name: actions
+ columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+
+ - name: items
+ columns:
+ - id int
+ - price float
+ - tm timestamp
+ indexs:
+ - idx:id:tm
+ data: |
+ 1, 99, 1000
+ 1, 599, 4000
+ 2, 199, 3000
+ 3, 399, 5000
+ # userid, itemid, actionTime, id, price, tm
+ # 1, 1, 1000, 1, 599, 4000
+ # 2, 2, 2000, 2, 199, 3000
+ # 3, 3, 3000, 3, 399, 5000
+ # 4, 3, 4000, 3, 399, 5000
+ sql: |
+ select
+ userId,
+ itemId,
+ count(itemId) over w1 as count_1,
+ sum(price) over w1 as total,
+ actionTime
+ from (
+ select * from actions
+ last join items order by tm
+ on actions.itemId = items.id and actionTime <= tm
+ ) window w1 as (
+ partition by itemId
+ order by actionTime
+ rows_range between 3000 preceding and current row
+ );
+ request_plan: |
+ PROJECT(type=Aggregation)
+ JOIN(type=LastJoin, right_sort=(ASC), condition=actionTime <= tm, left_keys=(), right_keys=(), index_keys=(actions.itemId))
+ REQUEST_UNION(partition_keys=(), orders=(ASC), range=(actionTime, 3000 PRECEDING, 0 CURRENT), index_keys=(itemId))
+ DATA_PROVIDER(request=actions)
+ DATA_PROVIDER(type=Partition, table=actions, index=index2)
+ DATA_PROVIDER(type=Partition, table=items, index=idx)
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - count_1 int64
+ - total float
+ - actionTime timestamp
+ data: |
+ 1, 1, 1, 599, 1000
+ 2, 2, 1, 199, 2000
+ 3, 3, 1, 399, 3000
+ 4, 3, 2, 798, 4000
diff --git a/cases/query/limit.yaml b/cases/query/limit.yaml
new file mode 100644
index 00000000000..1cfe0b2d1f3
--- /dev/null
+++ b/cases/query/limit.yaml
@@ -0,0 +1,461 @@
+# SQL limit clause
+# Syntax:
+# `LIMIT `
+# >= 0
+#
+# Supported conjunction with
+# - where clause
+# - window project
+# - last join
+# - group by
+# - having clause
+#
+# limit clause may optimized into its producer node during executing, e.g for
+# - where clause
+# - group by
+#
+# cases:
+# - limit(table) -> 0
+# - limit(filter) -> 1*
+# - limit(window) -> 2*
+# - limit(last join) -> 3*
+# - limit(group by) -> 4*
+# - limit(group by & having) -> 5*
+# - limit query in subquery: not supported
+
+cases:
+ - id: 0-0
+ desc: simple limit over select
+ mode: request-unsupport
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select * from {0} limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ data: |
+ 3, 3, 3000
+ 4, 3, 4000
+
+ - id: 0-1
+ desc: simple limit over select limit 0
+ mode: request-unsupport
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select * from {0} limit 0
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ data: |
+
+ - id: 1-0
+ mode: request-unsupport
+ desc: limit over filter op, without index hit
+ inputs:
+ - name: actions
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ a, 1, 1000
+ a, 2, 2000
+ b, 3, 3000
+ b, 3, 3000
+ sql: |
+ select * from actions where itemId != 3 limit 1
+ expect:
+ order: itemId
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ data: |
+ a, 1, 1000
+ - id: 1-1
+ mode: request-unsupport
+ desc: limit over filter op, with index hit
+ inputs:
+ - name: actions
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ a, 1, 1000
+ a, 2, 2000
+ b, 3, 3000
+ b, 3, 4000
+ sql: |
+ select * from actions where itemId = 3 limit 1
+ expect:
+ order: itemId
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ data: |
+ b, 3, 4000
+
+ - id: 1-2
+ mode: request-unsupport
+ desc: limit over filter op, limit 0
+ inputs:
+ - name: actions
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ a, 1, 1000
+ a, 2, 2000
+ b, 3, 3000
+ b, 3, 4000
+ sql: |
+ select * from actions where itemId = 3 limit 0
+ expect:
+ order: itemId
+ columns:
+ - userId string
+ - itemId int
+ - actionTime timestamp
+ data: |
+
+ - id: 2
+ mode: request-unsupport
+ desc: |
+ limit (window)
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, itemId, min(actionTime) over w as ma from {0}
+ window w as (
+ partition by itemId order by actionTime
+ rows_range between 1s preceding and current row)
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - ma timestamp
+ data: |
+ 3, 3, 3000
+ 4, 3, 3000
+ - id: 2-1
+ mode: request-unsupport
+ desc: |
+ limit (window), without index optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, itemId, min(actionTime) over w as ma from {0}
+ window w as (
+ partition by userId order by actionTime
+ rows_range between 1s preceding and current row)
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - ma timestamp
+ data: |
+ 3, 3, 3000
+ 4, 3, 4000
+ - id: 2-2
+ mode: request-unsupport
+ desc: |
+ limit (window), limit 0
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, itemId, min(actionTime) over w as ma from {0}
+ window w as (
+ partition by userId order by actionTime
+ rows_range between 1s preceding and current row)
+ limit 0
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - ma timestamp
+ data: |
+
+ - id: 3-0
+ mode: request-unsupport
+ desc: |
+ limit (last join), with index optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ - columns:
+ - userId int
+ - val string
+ - createTime timestamp
+ indexs:
+ - idx:userId:createTime
+ data: |
+ 1, a, 1000
+ 2, b, 1000
+ 4, c, 1000
+ sql: |
+ select {0}.userId, {0}.itemId, {1}.val from {0}
+ last join {1} on {0}.userId = {1}.userId
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - val string
+ data: |
+ 3, 3, NULL
+ 4, 3, c
+ - id: 3-1
+ mode: request-unsupport
+ desc: |
+ limit (last join), without index optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ - columns:
+ - id int
+ - userId int
+ - val string
+ - createTime timestamp
+ indexs:
+ - idx:id:createTime
+ data: |
+ 1, 1, a, 1000
+ 2, 2, b, 1000
+ 3, 4, c, 1000
+ sql: |
+ select {0}.userId, {0}.itemId, {1}.val from {0}
+ last join {1} on {0}.userId = {1}.userId
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - itemId int
+ - val string
+ data: |
+ 3, 3, NULL
+ 4, 3, c
+ - id: 4-0
+ mode: request-unsupport
+ desc: |
+ limit (group by), with index optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select itemId, count(userId) as cnt from {0} group by itemId
+ limit 1
+ expect:
+ order: itemId
+ columns:
+ - itemId int
+ - cnt int64
+ data: |
+ 3, 2
+ - id: 4-1
+ mode: request-unsupport
+ desc: |
+ limit (group by), without index optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, count(userId) as cnt from {0} group by userId
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - cnt int64
+ data: |
+ 3, 1
+ 4, 1
+ - id: 4-2
+ mode: request-unsupport
+ desc: |
+ limit (group by), limit 0
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, count(userId) as cnt from {0} group by userId
+ limit 0
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - cnt int64
+ data: |
+
+ - id: 5-0
+ mode: request-unsupport
+ desc: |
+ limit (group by & having), with optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select itemId, count(userId) as cnt from {0} group by itemId
+ having sum(userId) = 2
+ limit 1
+ expect:
+ order: itemId
+ columns:
+ - itemId int
+ - cnt int64
+ data: |
+ 2, 1
+ - id: 5-1
+ mode: request-unsupport
+ desc: |
+ limit (group by & having), without optimization
+ inputs:
+ - columns:
+ - userId int
+ - itemId int
+ - actionTime timestamp
+ indexs:
+ - index2:itemId:actionTime
+ data: |
+ 1, 1, 1000
+ 2, 2, 2000
+ 3, 3, 3000
+ 4, 3, 4000
+ sql: |
+ select userId, count(userId) as cnt from {0} group by userId
+ having sum(itemId) <= 2 and sum(itemId) > 1
+ limit 2
+ expect:
+ order: userId
+ columns:
+ - userId int
+ - cnt int64
+ data: |
+ 2, 1
diff --git a/cases/query/operator_query.yaml b/cases/query/operator_query.yaml
index 62de6f87981..fda13d4e349 100644
--- a/cases/query/operator_query.yaml
+++ b/cases/query/operator_query.yaml
@@ -17,7 +17,6 @@ debugs:
cases:
- id: 0
desc: 逻辑运算AND
- mode: request
db: db1
sql: select col1, col2, col1 >2 AND col2 > 2 as flag from t1;
inputs:
@@ -37,32 +36,8 @@ cases:
2, 3, false
3, 4, true
4, 5, true
- - id: 1
- desc: 逻辑运算&&
- tags: ["TODO", "zetasql-unsupport"]
- mode: request
- db: db1
- sql: select col1, col2, (col1 >2) && (col2 > 2) as flag from t1;
- inputs:
- - name: t1
- schema: col1:int32, col2:int64
- index: index1:col1:col2
- data: |
- 1, 2
- 2, 3
- 3, 4
- 4, 5
- expect:
- schema: col1:int32, col2:int64, flag:bool
- order: col1
- data: |
- 1, 2, false
- 2, 3, false
- 3, 4, true
- 4, 5, true
- id: 2
desc: 逻辑运算OR
- mode: request
db: db1
sql: select col1, col2, (col1 >2) OR (col2 > 2) as flag from t1;
inputs:
@@ -82,32 +57,8 @@ cases:
2, 3, true
3, 4, true
4, 5, true
- - id: 3
- desc: 逻辑运算||
- tags: ["TODO", "zetasql-unsupport"]
- mode: request
- db: db1
- sql: select col1, col2, (col1 >2) || (col2 > 2) as flag from t1;
- inputs:
- - name: t1
- schema: col1:int32, col2:int64
- index: index1:col1:col2
- data: |
- 1, 2
- 2, 3
- 3, 4
- 4, 5
- expect:
- schema: col1:int32, col2:int64, flag:bool
- order: col1
- data: |
- 1, 2, false
- 2, 3, true
- 3, 4, true
- 4, 5, true
- id: 4
desc: 逻辑运算NOT
- mode: request
db: db1
sql: select col1, col2, NOT ((col1 >2) OR (col2 > 2)) as flag from t1;
inputs:
@@ -129,7 +80,6 @@ cases:
4, 5, false
- id: 5
desc: 逻辑运算!
- mode: request
db: db1
sql: select col1, col2, !((col1 >2) OR (col2 > 2)) as flag from t1;
inputs:
@@ -151,7 +101,6 @@ cases:
4, 5, false
- id: 6
desc: 逻辑运算XOR
- mode: request
db: db1
sql: select col1, col2, (col1 > 2) XOR (col2 > 2) as flag from t1;
inputs:
@@ -174,7 +123,6 @@ cases:
- id: 7
desc: 比较运算<>
- mode: request
db: db1
sql: select col1, col2, col1 <> 2 as flag from t1;
inputs:
@@ -196,46 +144,88 @@ cases:
4, 5, true
- id: 8
- desc: 算术运算DIV
- mode: request
+ desc: 算术运算DIV, integer division
db: db1
- sql: select col1, col2, col2 DIV col1 as div21 from t1;
+ sql: |
+ select
+ col1, col2,
+ col2 DIV col1 as div21,
+ col2 DIV NULL as div3
+ from t1;
inputs:
- name: t1
schema: col1:int32, col2:int64
index: index1:col1:col2
data: |
+ 0, 7
1, 2
2, 3
3, 7
4, 13
+ 5, 0
+ 6, NULL
expect:
- schema: col1:int32, col2:int64, div21:int64
+ schema: col1:int32, col2:int64, div21:int64, div3:int64
order: col1
data: |
- 1, 2, 2
- 2, 3, 1
- 3, 7, 2
- 4, 13, 3
+ 0, 7, NULL, NULL
+ 1, 2, 2, NULL
+ 2, 3, 1, NULL
+ 3, 7, 2, NULL
+ 4, 13,3, NULL
+ 5, 0, 0, NULL
+ 6, NULL, NULL, NULL
- id: 9
desc: 算术运算MOD
- mode: request
db: db1
- sql: select col1, col2, col2 MOD col1 as mod21 from t1;
+ sql: |
+ select
+ col1, col2,
+ col2 MOD col1 as m21,
+ col3 % col1 as m31,
+ col4 MOD col3 as m43
+ from t1;
inputs:
- name: t1
- schema: col1:int32, col2:int64
+ schema: col1:int32, col2:int64, col3:float, col4:double
index: index1:col1:col2
data: |
- 1, 2
- 2, 3
- 3, 7
- 4, 14
+ 0, 4, 2.0, 3.0
+ 1, 0, 2.0, 3.0
+ 2, 4, NULL, 9.0
+ 3, 9, 9.0, 18.0
+ expect:
+ schema: col1:int32, col2:int64, m21:int64, m31:float, m43:double
+ order: col1
+ data: |
+ 0, 4, NULL, NULL, 1.0
+ 1, 0, 0, 0.0, 1.0
+ 2, 4, 0, NULL, NULL
+ 3, 9, 0, 0.0, 0.0
+ - id: 10
+ desc: 算术运算 '/', float division
+ db: db1
+ sql: |
+ select
+ col1, col2,
+ col2 / col1 as div21,
+ col3 / col1 as div31,
+ col4 / col3 as div43
+ from t1;
+ inputs:
+ - name: t1
+ schema: col1:int32, col2:int64, col3:float, col4:double
+ index: index1:col1:col2
+ data: |
+ 0, 4, 2.0, 3.0
+ 1, 0, 2.0, 3.0
+ 2, 4, NULL, 9.0
+ 3, 9, 9.0, 18.0
expect:
- schema: col1:int32, col2:int64, mod21:int64
+ schema: col1:int32, col2:int64, div21:double, div31:double, div43:double
order: col1
data: |
- 1, 2, 0
- 2, 3, 1
- 3, 7, 1
- 4, 14, 2
+ 0, 4, NULL, NULL, 1.5
+ 1, 0, 0.0, 2.0, 1.5
+ 2, 4, 2.0, NULL, NULL
+ 3, 9, 3.0, 3.0, 2.0
diff --git a/cases/query/parameterized_query.yaml b/cases/query/parameterized_query.yaml
index 455f31ac619..b3c58fcf710 100644
--- a/cases/query/parameterized_query.yaml
+++ b/cases/query/parameterized_query.yaml
@@ -13,6 +13,7 @@
# limitations under the License.
db: testdb
debugs: []
+version: 0.5.0
cases:
- id: 0
desc: 带参数的Where条件命中索引
diff --git a/cases/query/udaf_query.yaml b/cases/query/udaf_query.yaml
index 2e4a25e6dec..713832ade95 100644
--- a/cases/query/udaf_query.yaml
+++ b/cases/query/udaf_query.yaml
@@ -138,7 +138,7 @@ cases:
sum(f1) over w as sum,
avg(d1) over w as av
from t1
- window w as (partition by `key1` order by `ts` rows_range between 5s open preceding and 0s preceding maxsize 10);
+ window w as (partition by `key1` order by `ts` rows_range between 5s open preceding and 0s preceding maxsize 10)
expect:
columns:
- id int
diff --git a/demo/Dockerfile b/demo/Dockerfile
index cfa1faf1704..221666164ab 100644
--- a/demo/Dockerfile
+++ b/demo/Dockerfile
@@ -14,7 +14,7 @@ COPY talkingdata-adtracking-fraud-detection /work/talkingdata/
ENV LANG=en_US.UTF-8
ENV SPARK_HOME=/work/openmldb/spark-3.0.0-bin-openmldbspark
-ARG OPENMLDB_VERSION=0.5.0
+ARG OPENMLDB_VERSION=0.6.3
COPY setup_openmldb.sh /
RUN /setup_openmldb.sh "${OPENMLDB_VERSION}" && rm /setup_openmldb.sh
diff --git a/demo/JD-recommendation/README.md b/demo/JD-recommendation/README.md
new file mode 100644
index 00000000000..0018cc8cac7
--- /dev/null
+++ b/demo/JD-recommendation/README.md
@@ -0,0 +1,62 @@
+For full instructions, please refer to https://github.com/4paradigm/OpenMLDB/blob/main/docs/zh/use_case/JD_recommendation.md
+
+#Training:
+1. Engage openmldb for feature extraction:
+##in openmldb docker
+docker exec -it demo bash
+##launch openmldb CLI
+./init.sh
+##create data tables
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/create_tables.sql
+##load offline data
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_data.sql
+echo "show jobs;" | /work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+##select features
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/sync_select_out.sql
+
+2. process openmldb output data:
+##outside openmldb docker
+conda activate oneflow
+
+cd openmldb_process
+##pass in directory of openmldb results
+bash process_JD_out_full.sh $demodir/out/1
+##output data in $demodir/openmldb_process/out
+##note output information, table_size_array
+
+3. Launch oneflow deepfm model training:
+cd oneflow_process/
+##modify directory, sample size, table_size_array information in train_deepfm.sh accordingly
+bash train_deepfm.sh $demodir
+
+
+#Model Serving
+1. Configure openmldb for online feature extraction:
+##in openmldb docker
+docker exec -it demo bash
+##deploy feature extraction
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/deploy.sql
+##load online data
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_online_data.sql
+
+2. Configure oneflow for model serving
+#check if config.pbtxt, model files, persistent path are correcly set
+
+3. Start prediction server
+cd openmldb_serving/
+## start prediction server
+./start_predict_server.sh 0.0.0.0:9080
+## start oneflow serving
+replace demodir with your demo folder path
+docker run --runtime=nvidia --rm --network=host \
+ -v $demodir/oneflow_process/model:/models \
+ -v /home/gtest/work/oneflow_serving/serving/build/libtriton_oneflow.so:/backends/oneflow/libtriton_oneflow.so \
+ -v /home/gtest/work/oneflow_serving/oneflow/build/liboneflow_cpp/lib/:/mylib \
+ -v $demodir/oneflow_process/persistent:/root/demo/persistent \
+ registry.cn-beijing.aliyuncs.com/oneflow/triton-devel \
+ bash -c 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/mylib /opt/tritonserver/bin/tritonserver \
+ --model-repository=/models --backend-directory=/backends'
+
+Test:
+## send data for prediciton
+python predict.py
diff --git a/demo/JD-recommendation/create_tables.sql b/demo/JD-recommendation/create_tables.sql
new file mode 100644
index 00000000000..ef40632f465
--- /dev/null
+++ b/demo/JD-recommendation/create_tables.sql
@@ -0,0 +1,8 @@
+ CREATE DATABASE IF NOT EXISTS JD_db;
+ USE JD_db;
+ CREATE TABLE IF NOT EXISTS action(reqId string, eventTime timestamp, ingestionTime timestamp, actionValue int);
+ CREATE TABLE IF NOT EXISTS flattenRequest(reqId string, eventTime timestamp, main_id string, pair_id string, user_id string, sku_id string, time bigint, split_id int, time1 string);
+ CREATE TABLE IF NOT EXISTS bo_user(ingestionTime timestamp, user_id string, age string, sex string, user_lv_cd string, user_reg_tm bigint);
+ CREATE TABLE IF NOT EXISTS bo_action(ingestionTime timestamp, pair_id string, time bigint, model_id string, type string, cate string, br string);
+ CREATE TABLE IF NOT EXISTS bo_product(ingestionTime timestamp, sku_id string, a1 string, a2 string, a3 string, cate string, br string);
+ CREATE TABLE IF NOT EXISTS bo_comment(ingestionTime timestamp, dt bigint, sku_id string, comment_num int, has_bad_comment string, bad_comment_rate float);
diff --git a/demo/JD-recommendation/data/JD_data/action/_SUCCESS b/demo/JD-recommendation/data/JD_data/action/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/action/action.parquet b/demo/JD-recommendation/data/JD_data/action/action.parquet
new file mode 100644
index 00000000000..8fb86292ddc
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/action/action.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/bo_action/_SUCCESS b/demo/JD-recommendation/data/JD_data/bo_action/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/bo_action/bo_action.parquet b/demo/JD-recommendation/data/JD_data/bo_action/bo_action.parquet
new file mode 100644
index 00000000000..f77881bc834
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/bo_action/bo_action.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/bo_comment/_SUCCESS b/demo/JD-recommendation/data/JD_data/bo_comment/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/bo_comment/bo_comment.parquet b/demo/JD-recommendation/data/JD_data/bo_comment/bo_comment.parquet
new file mode 100644
index 00000000000..3ca15c9cfdd
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/bo_comment/bo_comment.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/bo_product/_SUCCESS b/demo/JD-recommendation/data/JD_data/bo_product/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/bo_product/bo_product.parquet b/demo/JD-recommendation/data/JD_data/bo_product/bo_product.parquet
new file mode 100644
index 00000000000..6f25290090a
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/bo_product/bo_product.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/bo_user/_SUCCESS b/demo/JD-recommendation/data/JD_data/bo_user/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/bo_user/bo_user.parquet b/demo/JD-recommendation/data/JD_data/bo_user/bo_user.parquet
new file mode 100644
index 00000000000..6c4d3d364c0
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/bo_user/bo_user.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/flattenRequest_clean/_SUCCESS b/demo/JD-recommendation/data/JD_data/flattenRequest_clean/_SUCCESS
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/demo/JD-recommendation/data/JD_data/flattenRequest_clean/flattenRequest_clean.parquet b/demo/JD-recommendation/data/JD_data/flattenRequest_clean/flattenRequest_clean.parquet
new file mode 100644
index 00000000000..7a7379f7e42
Binary files /dev/null and b/demo/JD-recommendation/data/JD_data/flattenRequest_clean/flattenRequest_clean.parquet differ
diff --git a/demo/JD-recommendation/data/JD_data/openmldb_sql.txt b/demo/JD-recommendation/data/JD_data/openmldb_sql.txt
new file mode 100644
index 00000000000..e112358e03e
--- /dev/null
+++ b/demo/JD-recommendation/data/JD_data/openmldb_sql.txt
@@ -0,0 +1,92 @@
+select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ `sku_id` as flattenRequest_sku_id_combine_30,
+ `sku_id` as flattenRequest_sku_id_combine_31,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ `sku_id` as flattenRequest_sku_id_combine_33,
+ `sku_id` as flattenRequest_sku_id_combine_34,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ `user_id` as flattenRequest_user_id_combine_40,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ `user_id` as flattenRequest_user_id_combine_42,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43,
+ `user_id` as flattenRequest_user_id_combine_44
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17
+
diff --git a/demo/JD-recommendation/data/JD_data/schema.json b/demo/JD-recommendation/data/JD_data/schema.json
new file mode 100644
index 00000000000..6335d003c3a
--- /dev/null
+++ b/demo/JD-recommendation/data/JD_data/schema.json
@@ -0,0 +1,173 @@
+{
+ "tableInfo": {
+ "action": [
+ {
+ "name": "reqId",
+ "type": "string"
+ },
+ {
+ "name": "eventTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "ingestionTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "actionValue",
+ "type": "int"
+ }
+ ],
+ "flattenRequest": [
+ {
+ "name": "reqId",
+ "type": "string"
+ },
+ {
+ "name": "eventTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "main_id",
+ "type": "string"
+ },
+ {
+ "name": "pair_id",
+ "type": "string"
+ },
+ {
+ "name": "user_id",
+ "type": "string"
+ },
+ {
+ "name": "sku_id",
+ "type": "string"
+ },
+ {
+ "name": "time",
+ "type": "bigint"
+ },
+ {
+ "name": "split_id",
+ "type": "int"
+ },
+ {
+ "name": "time1",
+ "type": "string"
+ }
+ ],
+ "bo_user": [
+ {
+ "name": "ingestionTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "user_id",
+ "type": "string"
+ },
+ {
+ "name": "age",
+ "type": "string"
+ },
+ {
+ "name": "sex",
+ "type": "string"
+ },
+ {
+ "name": "user_lv_cd",
+ "type": "string"
+ },
+ {
+ "name": "user_reg_tm",
+ "type": "bigint"
+ }
+ ],
+ "bo_action": [
+ {
+ "name": "ingestionTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "pair_id",
+ "type": "string"
+ },
+ {
+ "name": "time",
+ "type": "bigint"
+ },
+ {
+ "name": "model_id",
+ "type": "string"
+ },
+ {
+ "name": "type",
+ "type": "string"
+ },
+ {
+ "name": "cate",
+ "type": "string"
+ },
+ {
+ "name": "br",
+ "type": "string"
+ }
+ ],
+ "bo_product": [
+ {
+ "name": "ingestionTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "sku_id",
+ "type": "string"
+ },
+ {
+ "name": "a1",
+ "type": "string"
+ },
+ {
+ "name": "a2",
+ "type": "string"
+ },
+ {
+ "name": "a3",
+ "type": "string"
+ },
+ {
+ "name": "cate",
+ "type": "string"
+ },
+ {
+ "name": "br",
+ "type": "string"
+ }
+ ],
+ "bo_comment": [
+ {
+ "name": "ingestionTime",
+ "type": "timestamp"
+ },
+ {
+ "name": "dt",
+ "type": "bigint"
+ },
+ {
+ "name": "sku_id",
+ "type": "string"
+ },
+ {
+ "name": "comment_num",
+ "type": "int"
+ },
+ {
+ "name": "has_bad_comment",
+ "type": "string"
+ },
+ {
+ "name": "bad_comment_rate",
+ "type": "float"
+ }
+ ]
+ }
+}
+
diff --git a/demo/JD-recommendation/deploy.sql b/demo/JD-recommendation/deploy.sql
new file mode 100644
index 00000000000..3bb2586d6e7
--- /dev/null
+++ b/demo/JD-recommendation/deploy.sql
@@ -0,0 +1,85 @@
+USE JD_db;
+deploy demo select * from
+(
+select
+`reqId` as reqId_1,
+`eventTime` as flattenRequest_eventTime_original_0,
+`reqId` as flattenRequest_reqId_original_1,
+`pair_id` as flattenRequest_pair_id_original_24,
+`sku_id` as flattenRequest_sku_id_original_25,
+`user_id` as flattenRequest_user_id_original_26,
+distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+`flattenRequest`
+window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+`flattenRequest`.`reqId` as reqId_3,
+`action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+`bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+`bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+`bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+`bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+`bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+`bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+`bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+`bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+`bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+`bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+`flattenRequest`
+last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+`reqId` as reqId_14,
+max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+(select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+`reqId` as reqId_17,
+fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+(select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17;
diff --git a/demo/JD-recommendation/load_data.sql b/demo/JD-recommendation/load_data.sql
new file mode 100644
index 00000000000..ad55e04c19d
--- /dev/null
+++ b/demo/JD-recommendation/load_data.sql
@@ -0,0 +1,9 @@
+USE JD_db;
+SET @@job_timeout=600000;
+SET @@execute_mode='offline';
+LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='overwrite');
+LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='overwrite');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='overwrite');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='overwrite');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='overwrite');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='overwrite');
diff --git a/demo/JD-recommendation/load_online_data.sql b/demo/JD-recommendation/load_online_data.sql
new file mode 100644
index 00000000000..ec4f905e664
--- /dev/null
+++ b/demo/JD-recommendation/load_online_data.sql
@@ -0,0 +1,9 @@
+USE JD_db;
+SET @@job_timeout=600000;
+SET @@execute_mode='online';
+LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='append');
+LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='append');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='append');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='append');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='append');
+LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='append');
diff --git a/demo/JD-recommendation/oneflow_process/deepfm_train_eval_JD.py b/demo/JD-recommendation/oneflow_process/deepfm_train_eval_JD.py
new file mode 100644
index 00000000000..466a881c8c9
--- /dev/null
+++ b/demo/JD-recommendation/oneflow_process/deepfm_train_eval_JD.py
@@ -0,0 +1,718 @@
+import argparse
+import os
+import sys
+import glob
+import time
+import math
+import numpy as np
+import psutil
+import oneflow as flow
+import oneflow.nn as nn
+from petastorm.reader import make_batch_reader
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
+
+
+def get_args(print_args=True):
+ def int_list(x):
+ return list(map(int, x.split(",")))
+
+ def str_list(x):
+ return list(map(str, x.split(",")))
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("--data_dir", type=str, required=True)
+ parser.add_argument(
+ "--num_train_samples", type=int, required=True, help="the number of train samples",
+ )
+ parser.add_argument(
+ "--num_val_samples", type=int, required=True, help="the number of validation samples",
+ )
+ parser.add_argument(
+ "--num_test_samples", type=int, required=True, help="the number of test samples"
+ )
+
+ parser.add_argument("--model_load_dir", type=str, default=None, help="model loading directory")
+ parser.add_argument("--model_save_dir", type=str, default=None, help="model saving directory")
+ parser.add_argument(
+ "--save_initial_model", action="store_true", help="save initial model parameters or not",
+ )
+ parser.add_argument(
+ "--save_model_after_each_eval",
+ action="store_true",
+ help="save model after each eval or not",
+ )
+
+ parser.add_argument("--embedding_vec_size", type=int, default=16, help="embedding vector size")
+ parser.add_argument(
+ "--dnn", type=int_list, default="1000,1000,1000,1000,1000", help="dnn hidden units number",
+ )
+ parser.add_argument("--net_dropout", type=float, default=0.2, help="net dropout rate")
+ parser.add_argument("--disable_fusedmlp", action="store_true", help="disable fused MLP or not")
+
+ parser.add_argument("--lr_factor", type=float, default=0.1)
+ parser.add_argument("--min_lr", type=float, default=1.0e-6)
+ parser.add_argument("--learning_rate", type=float, default=0.001, help="learning rate")
+
+ parser.add_argument(
+ "--batch_size", type=int, default=10000, help="training/evaluation batch size"
+ )
+ parser.add_argument(
+ "--train_batches", type=int, default=75000, help="the maximum number of training batches",
+ )
+ parser.add_argument("--loss_print_interval", type=int, default=100, help="")
+
+ parser.add_argument(
+ "--patience",
+ type=int,
+ default=2,
+ help="number of epochs with no improvement after which learning rate will be reduced",
+ )
+ parser.add_argument(
+ "--min_delta",
+ type=float,
+ default=1.0e-6,
+ help="threshold for measuring the new optimum, to only focus on significant changes",
+ )
+
+ parser.add_argument(
+ "--table_size_array",
+ type=int_list,
+ help="embedding table size array for sparse fields",
+ required=True,
+ )
+ parser.add_argument(
+ "--persistent_path", type=str, required=True, help="path for persistent kv store",
+ )
+ parser.add_argument(
+ "--store_type",
+ type=str,
+ default="cached_host_mem",
+ help="OneEmbeddig persistent kv store type: device_mem, cached_host_mem, cached_ssd",
+ )
+ parser.add_argument(
+ "--cache_memory_budget_mb",
+ type=int,
+ default=1024,
+ help="size of cache memory budget on each device in megabytes when store_type is cached_host_mem or cached_ssd",
+ )
+
+ parser.add_argument(
+ "--amp", action="store_true", help="enable Automatic Mixed Precision(AMP) training or not",
+ )
+ parser.add_argument("--loss_scale_policy", type=str, default="static", help="static or dynamic")
+
+ parser.add_argument(
+ "--disable_early_stop", action="store_true", help="enable early stop or not"
+ )
+ parser.add_argument("--save_best_model", action="store_true", help="save best model or not")
+ parser.add_argument(
+ "--save_graph_for_serving",
+ action="store_true",
+ help="Save Graph and OneEmbedding for serving. ",
+ )
+ parser.add_argument(
+ "--model_serving_path", type=str, required=True, help="Graph object path for model serving",
+ )
+ args = parser.parse_args()
+
+ if print_args and flow.env.get_rank() == 0:
+ _print_args(args)
+ return args
+
+
+def _print_args(args):
+ """Print arguments."""
+ print("------------------------ arguments ------------------------", flush=True)
+ str_list = []
+ for arg in vars(args):
+ dots = "." * (48 - len(arg))
+ str_list.append(" {} {} {}".format(arg, dots, getattr(args, arg)))
+ for arg in sorted(str_list, key=lambda x: x.lower()):
+ print(arg, flush=True)
+ print("-------------------- end of arguments ---------------------", flush=True)
+
+
+num_dense_fields = 13
+num_sparse_fields = 28
+
+
+class DeepFMDataReader(object):
+ """A context manager that manages the creation and termination of a
+ :class:`petastorm.Reader`.
+ """
+
+ def __init__(
+ self,
+ parquet_file_url_list,
+ batch_size,
+ num_epochs=1,
+ shuffle_row_groups=True,
+ shard_seed=2019,
+ shard_count=1,
+ cur_shard=0,
+ ):
+ self.parquet_file_url_list = parquet_file_url_list
+ self.batch_size = batch_size
+ self.num_epochs = num_epochs
+ self.shuffle_row_groups = shuffle_row_groups
+ self.shard_seed = shard_seed
+ self.shard_count = shard_count
+ self.cur_shard = cur_shard
+
+ fields = ["Label"]
+ fields += [f"I{i+1}" for i in range(num_dense_fields)]
+ fields += [f"C{i+1}" for i in range(num_sparse_fields)]
+ self.fields = fields
+ self.num_fields = len(fields)
+
+ def __enter__(self):
+ self.reader = make_batch_reader(
+ self.parquet_file_url_list,
+ workers_count=2,
+ shuffle_row_groups=self.shuffle_row_groups,
+ num_epochs=self.num_epochs,
+ shard_seed=self.shard_seed,
+ shard_count=self.shard_count,
+ cur_shard=self.cur_shard,
+ )
+ self.loader = self.get_batches(self.reader)
+ return self.loader
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.reader.stop()
+ self.reader.join()
+
+ def get_batches(self, reader, batch_size=None):
+ if batch_size is None:
+ batch_size = self.batch_size
+
+ tail = None
+
+ for rg in reader:
+ rgdict = rg._asdict()
+ rglist = [rgdict[field] for field in self.fields]
+ pos = 0
+ if tail is not None:
+ pos = batch_size - len(tail[0])
+ tail = list(
+ [
+ np.concatenate((tail[i], rglist[i][0 : (batch_size - len(tail[i]))]))
+ for i in range(self.num_fields)
+ ]
+ )
+ if len(tail[0]) == batch_size:
+ label = tail[0]
+ features = tail[1 : self.num_fields]
+ tail = None
+ yield label, np.stack(features, axis=-1)
+ else:
+ pos = 0
+ continue
+ while (pos + batch_size) <= len(rglist[0]):
+ label = rglist[0][pos : pos + batch_size]
+ features = [rglist[j][pos : pos + batch_size] for j in range(1, self.num_fields)]
+ pos += batch_size
+ yield label, np.stack(features, axis=-1)
+ if pos != len(rglist[0]):
+ tail = [rglist[i][pos:] for i in range(self.num_fields)]
+
+
+def make_criteo_dataloader(data_path, batch_size, shuffle=True):
+ """Make a Criteo Parquet DataLoader.
+ :return: a context manager when exit the returned context manager, the reader will be closed.
+ """
+ files = ["file://" + name for name in glob.glob(f"{data_path}/*.parquet")]
+ files.sort()
+
+ world_size = flow.env.get_world_size()
+ batch_size_per_proc = batch_size // world_size
+
+ return DeepFMDataReader(
+ files,
+ batch_size_per_proc,
+ None, # TODO: iterate over all eval dataset
+ shuffle_row_groups=shuffle,
+ shard_seed=2019,
+ shard_count=world_size,
+ cur_shard=flow.env.get_rank(),
+ )
+
+
+class OneEmbedding(nn.Module):
+ def __init__(
+ self,
+ table_name,
+ embedding_vec_size,
+ persistent_path,
+ table_size_array,
+ store_type,
+ cache_memory_budget_mb,
+ size_factor,
+ ):
+ assert table_size_array is not None
+ vocab_size = sum(table_size_array)
+
+ tables = [
+ flow.one_embedding.make_table_options(
+ [
+ flow.one_embedding.make_column_options(
+ flow.one_embedding.make_normal_initializer(mean=0, std=1e-4)
+ ),
+ flow.one_embedding.make_column_options(
+ flow.one_embedding.make_normal_initializer(mean=0, std=1e-4)
+ ),
+ ]
+ )
+ for _ in range(len(table_size_array))
+ ]
+
+ if store_type == "device_mem":
+ store_options = flow.one_embedding.make_device_mem_store_options(
+ persistent_path=persistent_path, capacity=vocab_size, size_factor=size_factor,
+ )
+ elif store_type == "cached_host_mem":
+ assert cache_memory_budget_mb > 0
+ store_options = flow.one_embedding.make_cached_host_mem_store_options(
+ cache_budget_mb=cache_memory_budget_mb,
+ persistent_path=persistent_path,
+ capacity=vocab_size,
+ size_factor=size_factor,
+ )
+ elif store_type == "cached_ssd":
+ assert cache_memory_budget_mb > 0
+ store_options = flow.one_embedding.make_cached_ssd_store_options(
+ cache_budget_mb=cache_memory_budget_mb,
+ persistent_path=persistent_path,
+ capacity=vocab_size,
+ size_factor=size_factor,
+ )
+ else:
+ raise NotImplementedError("not support", store_type)
+
+ super(OneEmbedding, self).__init__()
+ self.one_embedding = flow.one_embedding.MultiTableMultiColumnEmbedding(
+ name=table_name,
+ embedding_dim=embedding_vec_size,
+ dtype=flow.float,
+ key_type=flow.int64,
+ tables=tables,
+ store_options=store_options,
+ )
+
+ def forward(self, ids):
+ return self.one_embedding.forward(ids)
+
+
+class DNN(nn.Module):
+ def __init__(
+ self,
+ in_features,
+ hidden_units,
+ out_features,
+ skip_final_activation=False,
+ dropout=0.0,
+ fused=True,
+ ) -> None:
+ super(DNN, self).__init__()
+ if fused:
+ self.dropout_rates = [dropout] * len(hidden_units)
+ self.linear_layers = nn.FusedMLP(
+ in_features,
+ hidden_units,
+ out_features,
+ self.dropout_rates,
+ 0.0,
+ skip_final_activation,
+ )
+ else:
+ denses = []
+ dropout_rates = [dropout] * len(hidden_units) + [0.0]
+ use_relu = [True] * len(hidden_units) + [not skip_final_activation]
+ hidden_units = [in_features] + hidden_units + [out_features]
+ for idx in range(len(hidden_units) - 1):
+ denses.append(nn.Linear(hidden_units[idx], hidden_units[idx + 1], bias=True))
+ if use_relu[idx]:
+ denses.append(nn.ReLU())
+ if dropout_rates[idx] > 0:
+ denses.append(nn.Dropout(p=dropout_rates[idx]))
+ self.linear_layers = nn.Sequential(*denses)
+
+ for name, param in self.linear_layers.named_parameters():
+ if "weight" in name:
+ nn.init.xavier_normal_(param)
+ elif "bias" in name:
+ param.data.fill_(0.0)
+
+ def forward(self, x: flow.Tensor) -> flow.Tensor:
+ return self.linear_layers(x)
+
+
+def interaction(embedded_x: flow.Tensor) -> flow.Tensor:
+ return flow._C.fused_dot_feature_interaction([embedded_x], pooling="sum")
+
+
+class DeepFMModule(nn.Module):
+ def __init__(
+ self,
+ embedding_vec_size=128,
+ dnn=[1024, 1024, 512, 256],
+ use_fusedmlp=True,
+ persistent_path=None,
+ table_size_array=None,
+ one_embedding_store_type="cached_host_mem",
+ cache_memory_budget_mb=8192,
+ dropout=0.2,
+ ):
+ super(DeepFMModule, self).__init__()
+
+ self.embedding_vec_size = embedding_vec_size
+
+ self.embedding_layer = OneEmbedding(
+ table_name="sparse_embedding",
+ embedding_vec_size=[embedding_vec_size, 1],
+ persistent_path=persistent_path,
+ table_size_array=table_size_array,
+ store_type=one_embedding_store_type,
+ cache_memory_budget_mb=cache_memory_budget_mb,
+ size_factor=3,
+ )
+
+ self.dnn_layer = DNN(
+ in_features=embedding_vec_size * (num_dense_fields + num_sparse_fields),
+ hidden_units=dnn,
+ out_features=1,
+ skip_final_activation=True,
+ dropout=dropout,
+ fused=use_fusedmlp,
+ )
+
+ def forward(self, inputs) -> flow.Tensor:
+ multi_embedded_x = self.embedding_layer(inputs)
+ embedded_x = multi_embedded_x[:, :, 0 : self.embedding_vec_size]
+ lr_embedded_x = multi_embedded_x[:, :, -1]
+
+ # FM
+ lr_out = flow.sum(lr_embedded_x, dim=1, keepdim=True)
+ dot_sum = interaction(embedded_x)
+ fm_pred = lr_out + dot_sum
+
+ # DNN
+ dnn_pred = self.dnn_layer(embedded_x.flatten(start_dim=1))
+
+ return fm_pred + dnn_pred
+
+
+def make_deepfm_module(args):
+ model = DeepFMModule(
+ embedding_vec_size=args.embedding_vec_size,
+ dnn=args.dnn,
+ use_fusedmlp=not args.disable_fusedmlp,
+ persistent_path=args.persistent_path,
+ table_size_array=args.table_size_array,
+ one_embedding_store_type=args.store_type,
+ cache_memory_budget_mb=args.cache_memory_budget_mb,
+ dropout=args.net_dropout,
+ )
+ return model
+
+
+class DeepFMValGraph(flow.nn.Graph):
+ def __init__(self, deepfm_module, amp=False):
+ super(DeepFMValGraph, self).__init__()
+ self.module = deepfm_module
+ if amp:
+ self.config.enable_amp(True)
+
+ def build(self, features):
+ predicts = self.module(features.to("cuda"))
+ return predicts.sigmoid()
+
+
+class DeepFMTrainGraph(flow.nn.Graph):
+ def __init__(
+ self, deepfm_module, loss, optimizer, grad_scaler=None, amp=False, lr_scheduler=None,
+ ):
+ super(DeepFMTrainGraph, self).__init__()
+ self.module = deepfm_module
+ self.loss = loss
+ self.add_optimizer(optimizer, lr_sch=lr_scheduler)
+ self.config.allow_fuse_model_update_ops(True)
+ self.config.allow_fuse_add_to_output(True)
+ self.config.allow_fuse_cast_scale(True)
+ if amp:
+ self.config.enable_amp(True)
+ self.set_grad_scaler(grad_scaler)
+
+ def build(self, labels, features):
+ logits = self.module(features.to("cuda"))
+ loss = self.loss(logits, labels.to("cuda"))
+ loss.backward()
+ return loss.to("cpu")
+
+
+def make_lr_scheduler(args, optimizer):
+ batches_per_epoch = math.ceil(args.num_train_samples / args.batch_size)
+ milestones = [
+ batches_per_epoch * (i + 1)
+ for i in range(math.floor(math.log(args.min_lr / args.learning_rate, args.lr_factor)))
+ ]
+ multistep_lr = flow.optim.lr_scheduler.MultiStepLR(
+ optimizer=optimizer, milestones=milestones, gamma=args.lr_factor,
+ )
+
+ return multistep_lr
+
+
+def get_metrics(logs):
+ kv = {"auc": 1, "logloss": -1}
+ monitor_value = 0
+ for k, v in kv.items():
+ monitor_value += logs.get(k, 0) * v
+ return monitor_value
+
+
+def early_stop(epoch, monitor_value, best_metric, stopping_steps, patience=2, min_delta=1e-6):
+ rank = flow.env.get_rank()
+ stop_training = False
+ save_best = False
+ if monitor_value < best_metric + min_delta:
+ stopping_steps += 1
+ if rank == 0:
+ print("Monitor(max) STOP: {:.6f}!".format(monitor_value))
+ else:
+ stopping_steps = 0
+ best_metric = monitor_value
+ save_best = True
+ if stopping_steps >= patience:
+ stop_training = True
+ if rank == 0:
+ print(f"Early stopping at epoch={epoch}!")
+ return stop_training, best_metric, stopping_steps, save_best
+
+
+def train(args):
+ rank = flow.env.get_rank()
+
+ deepfm_module = make_deepfm_module(args)
+ deepfm_module.to_global(flow.env.all_device_placement("cuda"), flow.sbp.broadcast)
+
+ def load_model(dir):
+ if rank == 0:
+ print(f"Loading model from {dir}")
+ if os.path.exists(dir):
+ state_dict = flow.load(dir, global_src_rank=0)
+ deepfm_module.load_state_dict(state_dict, strict=False)
+ else:
+ if rank == 0:
+ print(f"Loading model from {dir} failed: invalid path")
+
+ if args.model_load_dir:
+ load_model(args.model_load_dir)
+
+ def save_model(subdir):
+ if not args.model_save_dir:
+ return
+ save_path = os.path.join(args.model_save_dir, subdir)
+ if rank == 0:
+ print(f"Saving model to {save_path}")
+ state_dict = deepfm_module.state_dict()
+ flow.save(state_dict, save_path, global_dst_rank=0)
+
+ if args.save_initial_model:
+ save_model("initial_checkpoint")
+
+ # TODO: clip gradient norm
+ opt = flow.optim.Adam(deepfm_module.parameters(), lr=args.learning_rate)
+ lr_scheduler = make_lr_scheduler(args, opt)
+ loss = flow.nn.BCEWithLogitsLoss(reduction="mean").to("cuda")
+
+ if args.loss_scale_policy == "static":
+ grad_scaler = flow.amp.StaticGradScaler(1024)
+ else:
+ grad_scaler = flow.amp.GradScaler(
+ init_scale=1073741824, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000,
+ )
+
+ eval_graph = DeepFMValGraph(deepfm_module, args.amp)
+ train_graph = DeepFMTrainGraph(
+ deepfm_module, loss, opt, grad_scaler, args.amp, lr_scheduler=lr_scheduler
+ )
+
+ batches_per_epoch = math.ceil(args.num_train_samples / args.batch_size)
+
+ best_metric = -np.inf
+ stopping_steps = 0
+ save_best = False
+ stop_training = False
+
+ cached_eval_batches = prefetch_eval_batches(
+ f"{args.data_dir}/val", args.batch_size, math.ceil(args.num_val_samples / args.batch_size),
+ )
+
+ deepfm_module.train()
+ epoch = 0
+ with make_criteo_dataloader(f"{args.data_dir}/train", args.batch_size) as loader:
+ step, last_step, last_time = -1, 0, time.time()
+ for step in range(1, args.train_batches + 1):
+ labels, features = batch_to_global(*next(loader))
+ loss = train_graph(labels, features)
+ if step % args.loss_print_interval == 0:
+ loss = loss.numpy()
+ if rank == 0:
+ latency = (time.time() - last_time) / (step - last_step)
+ throughput = args.batch_size / latency
+ last_step, last_time = step, time.time()
+ strtime = time.strftime("%Y-%m-%d %H:%M:%S")
+ print(
+ f"Rank[{rank}], Step {step}, Loss {loss:0.4f}, "
+ + f"Latency {(latency * 1000):0.3f} ms, Throughput {throughput:0.1f}, {strtime}"
+ )
+
+ if step % batches_per_epoch == 0:
+ epoch += 1
+ auc, logloss = eval(
+ args,
+ eval_graph,
+ tag="val",
+ cur_step=step,
+ epoch=epoch,
+ cached_eval_batches=cached_eval_batches,
+ )
+ if args.save_model_after_each_eval:
+ save_model(f"step_{step}_val_auc_{auc:0.5f}")
+
+ monitor_value = get_metrics(logs={"auc": auc, "logloss": logloss})
+
+ stop_training, best_metric, stopping_steps, save_best = early_stop(
+ epoch,
+ monitor_value,
+ best_metric=best_metric,
+ stopping_steps=stopping_steps,
+ patience=args.patience,
+ min_delta=args.min_delta,
+ )
+
+ if args.save_best_model and save_best:
+ if rank == 0:
+ print(f"Save best model: monitor(max): {best_metric:.6f}")
+ save_model("best_checkpoint")
+
+ if not args.disable_early_stop and stop_training:
+ break
+
+ deepfm_module.train()
+ last_time = time.time()
+
+ if args.save_best_model:
+ load_model(f"{args.model_save_dir}/best_checkpoint")
+ if rank == 0:
+ print("================ Test Evaluation ================")
+ eval(args, eval_graph, tag="test", cur_step=step, epoch=epoch)
+
+ if args.save_graph_for_serving:
+ del eval_graph
+ recompiled_eval_graph = compile_eval_graph(args, deepfm_module, tag="test")
+ eval_state_dict = recompiled_eval_graph.state_dict()
+ flow.save(recompiled_eval_graph, args.model_serving_path)
+
+
+def np_to_global(np):
+ t = flow.from_numpy(np)
+ return t.to_global(placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.broadcast)
+
+
+def batch_to_global(np_label, np_features, is_train=True):
+ labels = np_to_global(np_label.reshape(-1, 1)) if is_train else np_label.reshape(-1, 1)
+ features = np_to_global(np_features)
+ return labels, features
+
+
+def prefetch_eval_batches(data_dir, batch_size, num_batches):
+ cached_eval_batches = []
+ with make_criteo_dataloader(data_dir, batch_size, shuffle=False) as loader:
+ for _ in range(num_batches):
+ label, features = batch_to_global(*next(loader), is_train=False)
+ cached_eval_batches.append((label, features))
+ return cached_eval_batches
+
+
+def eval(args, eval_graph, tag="val", cur_step=0, epoch=0, cached_eval_batches=None):
+ if tag == "val":
+ batches_per_epoch = math.ceil(args.num_val_samples / args.batch_size)
+ else:
+ batches_per_epoch = math.ceil(args.num_test_samples / args.batch_size)
+
+ eval_graph.module.eval()
+ labels, preds = [], []
+ eval_start_time = time.time()
+
+ if cached_eval_batches == None:
+ with make_criteo_dataloader(
+ f"{args.data_dir}/{tag}", args.batch_size, shuffle=False
+ ) as loader:
+ eval_start_time = time.time()
+ for i in range(batches_per_epoch):
+ label, features = batch_to_global(*next(loader), is_train=False)
+ pred = eval_graph(features)
+ labels.append(label)
+ preds.append(pred.to_local())
+ else:
+ for i in range(batches_per_epoch):
+ label, features = cached_eval_batches[i]
+ pred = eval_graph(features)
+ labels.append(label)
+ preds.append(pred.to_local())
+
+ labels = (
+ np_to_global(np.concatenate(labels, axis=0)).to_global(sbp=flow.sbp.broadcast()).to_local()
+ )
+ preds = (
+ flow.cat(preds, dim=0)
+ .to_global(placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.split(0))
+ .to_global(sbp=flow.sbp.broadcast())
+ .to_local()
+ )
+
+ flow.comm.barrier()
+ eval_time = time.time() - eval_start_time
+
+ rank = flow.env.get_rank()
+
+ metrics_start_time = time.time()
+ auc = flow.roc_auc_score(labels, preds).numpy()[0]
+ logloss = flow._C.binary_cross_entropy_loss(preds, labels, weight=None, reduction="mean")
+ metrics_time = time.time() - metrics_start_time
+
+ if rank == 0:
+ host_mem_mb = psutil.Process().memory_info().rss // (1024 * 1024)
+ stream = os.popen("nvidia-smi --query-gpu=memory.used --format=csv")
+ device_mem_str = stream.read().split("\n")[rank + 1]
+
+ strtime = time.strftime("%Y-%m-%d %H:%M:%S")
+ print(
+ f"Rank[{rank}], Epoch {epoch}, Step {cur_step}, AUC {auc:0.6f}, LogLoss {logloss:0.6f}, "
+ + f"Eval_time {eval_time:0.2f} s, Metrics_time {metrics_time:0.2f} s, Eval_samples {labels.shape[0]}, "
+ + f"GPU_Memory {device_mem_str}, Host_Memory {host_mem_mb} MiB, {strtime}"
+ )
+
+ return auc, logloss
+
+
+def compile_eval_graph(args, deepfm_module, tag="val"):
+ eval_graph = DeepFMValGraph(deepfm_module, args.amp)
+ eval_graph.module.eval()
+ with make_criteo_dataloader(f"{args.data_dir}/{tag}", args.batch_size, shuffle=False) as loader:
+ label, features = batch_to_global(*next(loader), is_train=False)
+ # Cause we want to infer to GPU, so here set is_train as True to place input Tensor in CUDA Device
+ features = features.to("cuda")
+ pred = eval_graph(features)
+ return eval_graph
+
+
+if __name__ == "__main__":
+ os.system(sys.executable + " -m oneflow --doctor")
+ flow.boxing.nccl.enable_all_to_all(True)
+ args = get_args()
+ train(args)
diff --git a/demo/JD-recommendation/oneflow_process/model/embedding/config.pbtxt b/demo/JD-recommendation/oneflow_process/model/embedding/config.pbtxt
new file mode 100644
index 00000000000..877a7b260dc
--- /dev/null
+++ b/demo/JD-recommendation/oneflow_process/model/embedding/config.pbtxt
@@ -0,0 +1,34 @@
+name: "embedding"
+backend: "oneflow"
+max_batch_size: 10000
+
+input [
+ {
+ name: "INPUT_0"
+ data_type: TYPE_INT64
+ dims: [ 41 ]
+ }
+]
+
+output [
+ {
+ name: "OUTPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_GPU
+ gpus: [ 0 ]
+ }
+]
+
+parameters {
+ key: "one_embedding_persistent_table_path"
+ value: {
+ string_value: "/root/demo/persistent/0-1",
+ }
+}
diff --git a/demo/JD-recommendation/oneflow_process/train_deepfm.sh b/demo/JD-recommendation/oneflow_process/train_deepfm.sh
new file mode 100644
index 00000000000..6a17e1ffb00
--- /dev/null
+++ b/demo/JD-recommendation/oneflow_process/train_deepfm.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+DEVICE_NUM_PER_NODE=1
+DEMODIR="$1"
+DATA_DIR="$DEMODIR"/openmldb_process/out
+PERSISTENT_PATH="$DEMODIR"/oneflow_process/persistent
+MODEL_SAVE_DIR="$DEMODIR"/oneflow_process/model_out
+MODEL_SERVING_PATH="$DEMODIR"/oneflow_process/model/embedding/1/model
+
+python3 -m oneflow.distributed.launch \
+ --nproc_per_node $DEVICE_NUM_PER_NODE \
+ --nnodes 1 \
+ --node_rank 0 \
+ --master_addr 127.0.0.1 \
+ deepfm_train_eval_JD.py \
+ --disable_fusedmlp \
+ --data_dir "$DATA_DIR" \
+ --persistent_path "$PERSISTENT_PATH" \
+ --table_size_array "11,42,1105,200,11,1295,1,1,5,3,23,23,7,5042381,3127923,5042381,3649642,28350,105180,7,2,5042381,5,4,4,41,2,2,8,3456,4,5,5042381,10,60,5042381,843,17,1276,101,100" \
+ --store_type 'cached_host_mem' \
+ --cache_memory_budget_mb 1024 \
+ --batch_size 10000 \
+ --train_batches 75000 \
+ --loss_print_interval 100 \
+ --dnn "1000,1000,1000,1000,1000" \
+ --net_dropout 0.2 \
+ --learning_rate 0.001 \
+ --embedding_vec_size 16 \
+ --num_train_samples 4007924 \
+ --num_val_samples 504398 \
+ --num_test_samples 530059 \
+ --model_save_dir "$MODEL_SAVE_DIR" \
+ --save_best_model \
+ --save_graph_for_serving \
+ --model_serving_path "$MODEL_SERVING_PATH" \
+ --save_model_after_each_eval
diff --git a/demo/JD-recommendation/openmldb_process/cal_table_array_size.py b/demo/JD-recommendation/openmldb_process/cal_table_array_size.py
new file mode 100644
index 00000000000..68b8e04c95c
--- /dev/null
+++ b/demo/JD-recommendation/openmldb_process/cal_table_array_size.py
@@ -0,0 +1,15 @@
+import pandas as pd
+import sys
+
+path = sys.argv[1]
+
+train_data = pd.read_parquet(path+"/train/train.parquet")
+val_data = pd.read_parquet(path+"/val/val.parquet")
+test_data = pd.read_parquet(path+"/test/test.parquet")
+total = pd.concat([train_data,val_data], ignore_index=True)
+total = pd.concat([total,test_data], ignore_index=True)
+del total['Label']
+table_size = total.apply(lambda x: x.nunique(), axis = 0)
+
+print("table size array: ")
+print(*table_size.array, sep=',')
diff --git a/demo/JD-recommendation/openmldb_process/combine_convert.py b/demo/JD-recommendation/openmldb_process/combine_convert.py
new file mode 100644
index 00000000000..573ee235dcf
--- /dev/null
+++ b/demo/JD-recommendation/openmldb_process/combine_convert.py
@@ -0,0 +1,49 @@
+import os,sys
+import glob
+import pandas as pd
+import xxhash
+import numpy as np
+
+dataset = sys.argv[1]
+dstdir = sys.argv[2]
+
+extension = 'csv'
+all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
+
+combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
+#export to csv
+
+combined_csv.rename(columns={'reqId_1': 'C1','flattenRequest_eventTime_original_0': 'C2','flattenRequest_reqId_original_1': 'C3','flattenRequest_pair_id_original_24': 'C4','flattenRequest_sku_id_original_25': 'C5','flattenRequest_user_id_original_26': 'C6','flattenRequest_pair_id_window_unique_count_27': 'I1','flattenRequest_pair_id_window_top1_ratio_28': 'I2','flattenRequest_pair_id_window_top1_ratio_29': 'I3','flattenRequest_pair_id_window_unique_count_32': 'I4','flattenRequest_pair_id_window_count_35': 'I5','flattenRequest_eventTime_dayofweek_41': 'C7','flattenRequest_eventTime_isweekday_43': 'C8','reqId_3': 'C9','action_actionValue_multi_direct_2': 'Label','bo_product_a1_multi_direct_3': 'C10','bo_product_a2_multi_direct_4': 'C11','bo_product_a3_multi_direct_5': 'C12','bo_product_br_multi_direct_6': 'C13','bo_product_cate_multi_direct_7':'C14','bo_product_ingestionTime_multi_direct_8':'C15','bo_user_age_multi_direct_9': 'C16','bo_user_ingestionTime_multi_direct_10':'C17','bo_user_sex_multi_direct_11':'C18','bo_user_user_lv_cd_multi_direct_12':'C19','reqId_14':'C20','bo_comment_bad_comment_rate_multi_max_13': 'I6','bo_comment_bad_comment_rate_multi_min_14': 'I7','bo_comment_bad_comment_rate_multi_min_15': 'I8','bo_comment_comment_num_multi_unique_count_22': 'I9','bo_comment_has_bad_comment_multi_unique_count_23': 'I10','bo_comment_has_bad_comment_multi_top3frequency_30': 'C21','bo_comment_comment_num_multi_top3frequency_33': 'C22','reqId_17': 'C23','bo_action_br_multi_top3frequency_16': 'C24','bo_action_cate_multi_top3frequency_17': 'C25','bo_action_model_id_multi_top3frequency_18': 'C26','bo_action_model_id_multi_unique_count_19': 'I11','bo_action_model_id_multi_unique_count_20': 'I12','bo_action_type_multi_unique_count_21': 'I13','bo_action_type_multi_top3frequency_40': 'C27','bo_action_type_multi_top3frequency_42': 'C28'}, inplace=True)
+
+#combined_csv.to_csv( "combined_csv.csv", index=False)
+
+def generate_hash(val):
+ res = []
+ if val.name == 'Label':
+ return val
+ for i in val:
+ test = xxhash.xxh64(str(i), seed = 10)
+ res.append(test.intdigest())
+ return res
+
+
+cols = ['Label',
+ 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13',
+ 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7','C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14',
+ 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22','C23', 'C24', 'C25', 'C26', 'C27', 'C28']
+
+
+df = combined_csv[cols]
+df= df.apply(lambda x: generate_hash(x), axis = 0)
+
+
+for col in df.columns:
+ if col == 'Label':
+ df[col] = df[col].astype('float32')
+ else:
+ df[col] = df[col].astype('int64')
+df.to_parquet(dstdir+dataset+'.parquet', engine='pyarrow', index=False)
+
+sample_size = df['Label'].size
+print(dataset + " samples = " + str(sample_size))
+
diff --git a/demo/JD-recommendation/openmldb_process/process_JD_out_full.sh b/demo/JD-recommendation/openmldb_process/process_JD_out_full.sh
new file mode 100644
index 00000000000..f6abe3b1342
--- /dev/null
+++ b/demo/JD-recommendation/openmldb_process/process_JD_out_full.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+mkdir -p data_processed/train
+mkdir -p data_processed/test
+mkdir -p data_processed/valid
+
+number="$( find "$1"/*.csv | wc -l )"
+echo "total $number files"
+
+split1=$(( 8*number/10 ))
+split2=$(( 9*number/10 ))
+
+n=0
+echo "$split1 $split2"
+for f in "$1"/*.csv
+do
+ n=$(( n+1 ))
+ if [ "$n" -lt "$split1" ]
+ then
+ cp "$f" data_processed/train/.
+ elif [ "$n" -lt "$split2" ]
+ then
+ cp "$f" data_processed/valid/.
+ else
+ cp "$f" data_processed/test/.
+ fi
+ echo "processing $f ..."
+done
+
+cd data_processed/train || exit
+python3 ../../combine_convert.py train ../../out/train/
+cd ../.. || exit
+
+cd data_processed/valid || exit
+python3 ../../combine_convert.py val ../../out/val/
+cd ../.. || exit
+
+cd data_processed/test || exit
+python3 ../../combine_convert.py test ../../out/test/
+cd ../.. || exit
+
+python3 cal_table_array_size.py ./out/
+
+
+rm -rf data_processed
diff --git a/demo/JD-recommendation/serving/client.py b/demo/JD-recommendation/serving/client.py
new file mode 100644
index 00000000000..b67fb4fc437
--- /dev/null
+++ b/demo/JD-recommendation/serving/client.py
@@ -0,0 +1,37 @@
+"""
+Copyright 2020 The OneFlow Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import numpy as np
+import tritonclient.http as httpclient
+
+
+if __name__ == '__main__':
+ triton_client = httpclient.InferenceServerClient(url='127.0.0.1:8000')
+
+ data = np.ones((1,41)).astype(np.int64)
+
+ inputs = []
+ inputs.append(httpclient.InferInput('INPUT_0', data.shape, "INT64"))
+ inputs[0].set_data_from_numpy(data, binary_data=True)
+ outputs = []
+ outputs.append(httpclient.InferRequestedOutput('OUTPUT_0', binary_data=True, class_count=1))
+ now = time.time()
+ results = triton_client.infer("embedding", inputs=inputs, outputs=outputs)
+ print(time.time() - now)
+ output_data0 = results.as_numpy('OUTPUT_0')
+ print(output_data0.shape)
+ print(output_data0)
diff --git a/demo/JD-recommendation/serving/openmldb_serving/predict_server.py b/demo/JD-recommendation/serving/openmldb_serving/predict_server.py
new file mode 100644
index 00000000000..4d4d1d240d6
--- /dev/null
+++ b/demo/JD-recommendation/serving/openmldb_serving/predict_server.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module of predict server"""
+import numpy as np
+import tornado.web
+import tornado.ioloop
+import json
+import requests
+import argparse
+from process_res import process_infer
+
+#bst = None
+
+table_schema = [
+ ("reqId", "string"),
+ ("eventTime", "timestamp"),
+ ("main_id", "string"),
+ ("pair_id", "string"),
+ ("user_id", "string"),
+ ("sku_id", "string"),
+ ("time", "bigint"),
+ ("split_id", "int"),
+ ("time1", "string"),
+]
+
+url = ""
+
+def get_schema():
+ dict_schema_tmp = {}
+ for i in table_schema:
+ dict_schema_tmp[i[0]] = i[1]
+ return dict_schema_tmp
+
+dict_schema = get_schema()
+json_schema = json.dumps(dict_schema)
+
+
+class SchemaHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write(json_schema)
+
+
+class PredictHandler(tornado.web.RequestHandler):
+ """Class of PredictHandler docstring."""
+ def post(self):
+ row = json.loads(self.request.body)
+ data = {}
+ data["input"] = []
+ row_data = []
+ for i in table_schema:
+ if i[1] == "string":
+ row_data.append(row.get(i[0], ""))
+ elif i[1] == "int" or i[1] == "double" or i[1] == "timestamp" or i[1] == "bigint":
+ row_data.append(row.get(i[0], 0))
+ else:
+ row_data.append(None)
+
+ data["input"].append(row_data)
+ rs = requests.post(url, json=data)
+ result = json.loads(rs.text)
+ for r in result["data"]["data"]:
+ res = np.array(r)
+ self.write("----------------ins---------------\n")
+ self.write(str(res) + "\n")
+ pred = process_infer(res)
+ self.write("---------------predict change of purchase -------------\n")
+ self.write(f"{str(pred)}")
+
+class MainHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write("real time execute sparksql demo")
+
+
+def make_app():
+ return tornado.web.Application([
+ (r"/", MainHandler),
+ (r"/schema", SchemaHandler),
+ (r"/predict", PredictHandler),
+ ])
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("endpoint", help="specify the endpoint of apiserver")
+ args = parser.parse_args()
+ url = f"http://{args.endpoint}/dbs/JD_db/deployments/demo"
+ app = make_app()
+ app.listen(8887)
+ tornado.ioloop.IOLoop.current().start()
diff --git a/demo/JD-recommendation/serving/openmldb_serving/process_res.py b/demo/JD-recommendation/serving/openmldb_serving/process_res.py
new file mode 100644
index 00000000000..3a974eb6b06
--- /dev/null
+++ b/demo/JD-recommendation/serving/openmldb_serving/process_res.py
@@ -0,0 +1,84 @@
+import pandas as pd
+import xxhash
+import numpy as np
+import tritonclient.http as httpclient
+
+cols = ['C1','C2','C3','C4','C5','C6','I1','I2','I3','I4','I5','C7','C8','C9','Label','C10','C11','C12','C13','C14','C15','C16','C17','C18','C19','C20','I6','I7','I8','I9','I10','C21','C22','C23','C24','C25','C26','I11','I12','I13','C27','C28']
+
+res_cols = ['Label',
+ 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13',
+ 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7','C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14',
+ 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22','C23', 'C24', 'C25', 'C26', 'C27', 'C28']
+
+def get_schema():
+ dict_schema_tmp = {}
+ for i in table_schema:
+ dict_schema_tmp[i[0]] = i[1]
+ return dict_schema_tmp
+
+def generate_hash(val):
+ res = []
+ if val.name == 'Label':
+ return val
+ for i in val:
+ test = xxhash.xxh64(str(i), seed = 10)
+ res.append(test.intdigest())
+ return res
+
+def rearrange_and_mod(indata):
+ import pdb; pdb.set_trace()
+ if (len(cols) != len(indata) ):
+ self.write("Sample length not equal, please check")
+ return None
+
+ df = pd.DataFrame(columns=cols)
+ df.loc[0]=val
+ df = df[res_cols]
+
+ df= df.apply(lambda x: generate_hash(x), axis = 0)
+
+ for col in df.columns:
+ if col == 'Label':
+ df[col] = df[col].astype('float32')
+ else:
+ df[col] = df[col].astype('int64')
+ return df
+
+
+def process_res(datadir):
+ data= pd.read_csv(datadir)
+ import pdb; pdb.set_trace()
+ res = rearrange_and_mod(data)
+ print(res)
+
+def oneflow_infer(data):
+ triton_client = httpclient.InferenceServerClient(url='127.0.0.1:8000')
+ inputs = []
+ inputs.append(httpclient.InferInput('INPUT_0', data.shape, "INT64"))
+ inputs[0].set_data_from_numpy(data, binary_data=True)
+ outputs = []
+ outputs.append(httpclient.InferRequestedOutput('OUTPUT_0', binary_data=True, class_count=1))
+ results = triton_client.infer("embedding", inputs=inputs, outputs=outputs)
+ output_data = results.as_numpy('OUTPUT_0')
+ return output_data
+
+
+def process_infer(data):
+
+ df = pd.DataFrame(columns=cols)
+ df.loc[0]=data
+ df = df[res_cols]
+
+ df= df.apply(lambda x: generate_hash(x), axis = 0)
+
+ for col in df.columns:
+ if col == 'Label':
+ df[col] = df[col].astype('float32')
+ else:
+ df[col] = df[col].astype('int64')
+
+ label = df['Label']
+ del df['Label']
+ data = df.values
+ res = oneflow_infer(data)
+ return res
diff --git a/steps/integration_test.sh b/demo/JD-recommendation/serving/openmldb_serving/start_predict_server.sh
old mode 100644
new mode 100755
similarity index 67%
rename from steps/integration_test.sh
rename to demo/JD-recommendation/serving/openmldb_serving/start_predict_server.sh
index 0a20391b0c8..aaec265c530
--- a/steps/integration_test.sh
+++ b/demo/JD-recommendation/serving/openmldb_serving/start_predict_server.sh
@@ -1,3 +1,5 @@
+#! /bin/bash
+#
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,12 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# $1 should be 1(multi dimension) or 0(one dimension)
-# $2 should be regex for filter testcases
-ulimit -c unlimited
-if [ -f "test-common/integrationtest/setup.sh" ]
-then
- export runlist=$2
- export norunlist=$3
- sh test-common/integrationtest/runall.sh $1 $2 $3
-fi
+# start_predict_server.sh
+
+echo "start predict server"
+nohup python3 predict_server.py "$1" >/tmp/p.log 2>&1 &
+sleep 1
diff --git a/demo/JD-recommendation/serving/predict.py b/demo/JD-recommendation/serving/predict.py
new file mode 100644
index 00000000000..2b17f9268e1
--- /dev/null
+++ b/demo/JD-recommendation/serving/predict.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Module of request predict in script"""
+import requests
+
+url = "http://127.0.0.1:8887/predict"
+
+req = {"reqId": "200080_5505_2016-03-15 20:43:04",
+ "eventTime": 1458045784000,
+ "main_id": "681271",
+ "pair_id": "200080_5505",
+ "user_id": "200080",
+ "sku_id": "5505",
+ "time": 1458045784000,
+ "split_id": 1,
+ "time1":"2016-03-15 20:43:04"}
+
+res = requests.post(url, json=req)
+print(res.text)
+
diff --git a/demo/JD-recommendation/sync_select_out.sql b/demo/JD-recommendation/sync_select_out.sql
new file mode 100644
index 00000000000..de9d667b655
--- /dev/null
+++ b/demo/JD-recommendation/sync_select_out.sql
@@ -0,0 +1,88 @@
+USE JD_db;
+set @@sync_job='true';
+set @@job_timeout='600000';
+select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17
+INTO OUTFILE '/root/project/out/1' OPTIONS(mode='overwrite');
diff --git a/demo/init.sh b/demo/init.sh
index 2fc90d15de5..074950515ae 100755
--- a/demo/init.sh
+++ b/demo/init.sh
@@ -29,6 +29,7 @@ set -e
rm -rf /tmp/openmldb_offline_storage/*
rm -rf /work/openmldb/logs*
rm -rf /work/openmldb/db*
+rm -rf /work/openmldb/taskmanager/bin/logs
sleep 2
echo "Starting openmldb in $MODE mode..."
if [[ "$MODE" = "standalone" ]]; then
diff --git a/demo/predict-taxi-trip-duration/README.md b/demo/predict-taxi-trip-duration/README.md
index dfc0cca7cd8..0143c9c7aee 100644
--- a/demo/predict-taxi-trip-duration/README.md
+++ b/demo/predict-taxi-trip-duration/README.md
@@ -28,7 +28,7 @@ w2 as (PARTITION BY passenger_count ORDER BY pickup_datetime ROWS_RANGE BETWEEN
**Start docker**
```
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
**Initialize environment**
```bash
@@ -138,7 +138,7 @@ python3 predict.py
**Start docker**
```bash
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
**Initialize environment**
diff --git a/demo/setup_openmldb.sh b/demo/setup_openmldb.sh
index 90548142985..3c10dfeea10 100755
--- a/demo/setup_openmldb.sh
+++ b/demo/setup_openmldb.sh
@@ -24,7 +24,7 @@ echo "version: ${VERSION}"
curl -SLo zookeeper.tar.gz https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
curl -SLo openmldb.tar.gz "https://github.com/4paradigm/OpenMLDB/releases/download/v${VERSION}/openmldb-${VERSION}-linux.tar.gz"
-curl -SLo spark-3.0.0-bin-openmldbspark.tgz "https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb${VERSION}/spark-3.0.0-bin-openmldbspark.tgz"
+curl -SLo spark-3.2.1-bin-openmldbspark.tgz "https://github.com/4paradigm/spark/releases/download/v3.2.1-openmldb${VERSION}/spark-3.2.1-bin-openmldbspark.tgz"
WORKDIR=/work
@@ -40,9 +40,9 @@ tar xzf openmldb.tar.gz -C "${WORKDIR}/openmldb" --strip-components 1
# remove symbols and sections
strip -s "${WORKDIR}/openmldb/bin/openmldb"
-mkdir -p "${WORKDIR}/openmldb/spark-3.0.0-bin-openmldbspark"
-tar xzf spark-3.0.0-bin-openmldbspark.tgz -C "${WORKDIR}/openmldb/spark-3.0.0-bin-openmldbspark" --strip-components 1
-rm -rf "${WORKDIR}/openmldb/spark-3.0.0-bin-openmldbspark/python"
+mkdir -p "${WORKDIR}/openmldb/spark-3.2.1-bin-openmldbspark"
+tar xzf spark-3.2.1-bin-openmldbspark.tgz -C "${WORKDIR}/openmldb/spark-3.2.1-bin-openmldbspark" --strip-components 1
+
rm -f ./*.tar.gz
rm -f ./*.tgz
diff --git a/demo/talkingdata-adtracking-fraud-detection/README.md b/demo/talkingdata-adtracking-fraud-detection/README.md
index 135f738c4ad..9f1339c3979 100644
--- a/demo/talkingdata-adtracking-fraud-detection/README.md
+++ b/demo/talkingdata-adtracking-fraud-detection/README.md
@@ -15,7 +15,7 @@ We recommend you to use docker to run the demo. OpenMLDB and dependencies have b
**Start docker**
```
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
#### Run locally
@@ -83,6 +83,22 @@ see [train_and_serve.py](https://github.com/4paradigm/OpenMLDB/blob/main/demo/ta
5. load data to online storage
6. update model to predict server
+#### The Jupyter Way
+
+You can use the jupyter nodebook `train_and_serve.ipynb`, the same with `train_and_serve.py`
+
+Steps:
+1. `docker ... -p 8888:8888 ...`, 8888 is the jupyter server default port.
+1. start openmldb and predict server
+1. `pip3 install notebook`
+1. run jupyter, `jupyter --ip 0.0.0.0 --allow-root`. You can set the password before running, `jupyter notebook password`.
+1. run `train_and_serve.ipynb` in jupyter notebook web.
+
+
+```{tip}
+Use `jupyter nbconvert --ClearOutputPreprocessor.enabled=True --ClearMetadataPreprocessor.enabled=True --ClearMetadataPreprocessor.preserve_cell_metadata_mask tags --to=notebook --log-level=ERROR --inplace train_and_serve.ipynb` to make notebook clean.
+```
+
### Predict
Predict once, send a post request to predict server `:/predict`. Or you can run the python script below.
diff --git a/demo/talkingdata-adtracking-fraud-detection/predict_server.py b/demo/talkingdata-adtracking-fraud-detection/predict_server.py
index 418754a8ae0..1074876e578 100644
--- a/demo/talkingdata-adtracking-fraud-detection/predict_server.py
+++ b/demo/talkingdata-adtracking-fraud-detection/predict_server.py
@@ -84,12 +84,13 @@ def post(self):
# result is a list, even we just do a single request
for res in get_result(response):
ins = build_feature(res)
+ logging.info(f"feature: {res}")
self.write("real-time feature:\n" + str(res) + "\n")
prediction = bst.predict(ins)
self.write(
"---------------predict whether is attributed -------------\n")
self.write(f"{str(prediction[0])}")
- logging.info("feature: %s, prediction: %s", res, prediction)
+ logging.info(f"prediction: {prediction}")
class MainHandler(tornado.web.RequestHandler):
diff --git a/demo/talkingdata-adtracking-fraud-detection/train_and_serve.ipynb b/demo/talkingdata-adtracking-fraud-detection/train_and_serve.ipynb
new file mode 100644
index 00000000000..7657b92ba66
--- /dev/null
+++ b/demo/talkingdata-adtracking-fraud-detection/train_and_serve.ipynb
@@ -0,0 +1,243 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "8cc1eec0",
+ "metadata": {},
+ "source": [
+ "OpenMLDB sdk init: connect to cluster, and register for sql magic\n",
+ "Plz do `/work/init.sh` to create the OpenMLDB cluster, and `python3 /work/talkingdata/predict_server.py --no-init > predict.log 2>&1 &` to start the simple predict server(receive the deployed sql and model, request it to do online feature extraction)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e7dbf87e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import openmldb\n",
+ "db = openmldb.dbapi.connect(database='demo_db',zk='127.0.0.1:2181',zkPath='/openmldb')\n",
+ "openmldb.sql_magic.register(db)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1829fe58",
+ "metadata": {},
+ "source": [
+ "The database and table name, which will be used later"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "04bc2d08",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "db_name=\"demo_db\"\n",
+ "table_name=\"talkingdata\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "691bbd5b",
+ "metadata": {},
+ "source": [
+ "You can use variables like:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1221a2fe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "var='1'\n",
+ "%sql SELECT {var};\n",
+ "%sql SELECT $var;"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "513d4aa6",
+ "metadata": {},
+ "source": [
+ "Create database and table(talking data schema)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3f9e500a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql create database if not exists $db_name;\n",
+ "%sql use $db_name;\n",
+ "%sql create table if not exists $table_name (ip int, app int, device int, os int, channel int, click_time timestamp, is_attributed int, click_id int, hour int, day int);"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9add0289",
+ "metadata": {},
+ "source": [
+ "Offline load data and extract feature"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e8c4e708",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql set @@execute_mode='offline';\n",
+ "%sql set @@sync_job=true;\n",
+ "%sql set @@job_timeout=600000;"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "672b3e84",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql load data infile 'file:///work/talkingdata/train_sample.csv' into table $table_name options(mode='overwrite');"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1185ab81",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sql_part = f\"\"\"\n",
+ "select is_attributed, app, device, os, channel, hour(click_time) as hour, day(click_time) as day, \n",
+ "count(channel) over w1 as qty, \n",
+ "count(channel) over w2 as ip_app_count, \n",
+ "count(channel) over w3 as ip_app_os_count \n",
+ "from {table_name} \n",
+ "window \n",
+ "w1 as (partition by ip order by click_time ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW), \n",
+ "w2 as(partition by ip, app order by click_time ROWS_RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),\n",
+ "w3 as(partition by ip, app, os order by click_time ROWS_RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)\n",
+ "\"\"\"\n",
+ "\n",
+ "train_feature_dir='/tmp/train_feature'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ee0cb1cc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql {sql_part} INTO OUTFILE '{train_feature_dir}' OPTIONS(mode='overwrite');"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "078bb8fb",
+ "metadata": {},
+ "source": [
+ "Train: we use a simple train script to do it, and save the model to 'model_path'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d1bc87b8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import xgboost_train_sample\n",
+ "model_path='/tmp/model.json'\n",
+ "xgboost_train_sample.train(f'{train_feature_dir}/*.csv', model_path)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f47ed227",
+ "metadata": {},
+ "source": [
+ "Deploy sql & model, and load data in online mode"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "95e54d38",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql SET @@execute_mode='online';"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "af8f5b18",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "deploy_name='d1'\n",
+ "%sql DEPLOY $deploy_name $sql_part;"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ccf50448",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%sql load data infile 'file:///work/talkingdata/train_sample.csv' into table $table_name options(mode='append');"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ad607b01",
+ "metadata": {},
+ "source": [
+ "Let the predict server know the sql and model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1fab5dbc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import requests\n",
+ "predict_server='localhost:8881'\n",
+ "infos = {'database': db_name, 'deployment': deploy_name, 'model_path': model_path}\n",
+ "res = requests.post('http://' + predict_server + '/update', json=infos)\n",
+ "res.text"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "96323097",
+ "metadata": {},
+ "source": [
+ "Then you can request the predict server to test\n",
+ "`python3 predict.py`"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/demo/talkingdata-adtracking-fraud-detection/xgboost_train_sample.py b/demo/talkingdata-adtracking-fraud-detection/xgboost_train_sample.py
new file mode 100644
index 00000000000..5037db91a41
--- /dev/null
+++ b/demo/talkingdata-adtracking-fraud-detection/xgboost_train_sample.py
@@ -0,0 +1,76 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import os
+
+import pandas as pd
+from sklearn.metrics import accuracy_score
+from sklearn.metrics import classification_report
+from sklearn.model_selection import train_test_split
+from xgboost.sklearn import XGBClassifier
+
+
+def read_dataset(train_feature_path):
+ if train_feature_path.startswith("/"):
+ # local file
+ if '*' in train_feature_path:
+ return pd.concat(map(pd.read_csv, glob.glob(os.path.join('', train_feature_path))))
+ else:
+ return pd.read_csv(train_feature_path)
+ else:
+ raise Exception("remote files is unsupported")
+
+
+# assume that the first column is the label
+def prepare_dataset(train_df, seed, test_size):
+ # drop column label
+ X_data = train_df.drop('is_attributed', axis=1)
+ y = train_df.is_attributed
+
+ # Split the dataset into train and Test
+ return train_test_split(
+ X_data, y, test_size=test_size, random_state=seed
+ )
+
+
+def xgboost_train(X_train, X_test, y_train, y_test, model_path):
+ print('Training by xgb')
+ # default is binary:logistic
+ train_model = XGBClassifier(use_label_encoder=False).fit(X_train, y_train)
+ pred = train_model.predict(X_test)
+ print('Classification report:\n', classification_report(y_test, pred))
+ auc = accuracy_score(y_test, pred) * 100
+ print(f'Accuracy score: {auc}')
+
+ print('Save model to ', model_path)
+ train_model.save_model(model_path)
+ return auc
+
+
+# only csv now
+def train(train_feature_path, model_path, seed=7, test_size=0.25):
+ train_df = read_dataset(train_feature_path)
+ X_train, X_test, y_train, y_test = prepare_dataset(train_df, seed, test_size)
+ return xgboost_train(X_train, X_test, y_train, y_test, model_path)
+
+
+def train_task(*op_args, **op_kwargs):
+ return train(op_args[0], op_args[1])
+
+
+if __name__ == '__main__':
+ print(glob.glob(os.path.join('', '/tmp/feature_data/*.csv')))
+ train('/tmp/feature_data/*.csv', '/tmp/model.json')
+
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6e721dfa544..ac1b4eb4d6b 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -15,8 +15,8 @@
FROM centos:7
-ARG ZETASQL_VERSION=0.2.11
-ARG THIRDPARTY_VERSION=0.5.0
+ARG ZETASQL_VERSION=0.2.12
+ARG THIRDPARTY_VERSION=0.5.2
ARG TARGETARCH
LABEL org.opencontainers.image.source https://github.com/4paradigm/OpenMLDB
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 00000000000..4951f80d7f9
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1 @@
+./build/
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000000..ae647f73ba2
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,33 @@
+.PHONY: all en zh en-local zh-local clean init all-local
+
+MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+MAKEFILE_DIR := $(dir $(MAKEFILE_PATH))
+
+POETRY_PRG ?= $(shell (command -v poetry || echo poetry))
+OUT_DIR ?= $(MAKEFILE_DIR)/build
+
+
+all: en zh
+
+en: init
+ $(POETRY_PRG) run sphinx-multiversion "$(MAKEFILE_DIR)/en" "$(OUT_DIR)/en"
+ echo ' ' > $(OUT_DIR)/en/index.html
+
+zh: init
+ $(POETRY_PRG) run sphinx-multiversion "$(MAKEFILE_DIR)/zh" "$(OUT_DIR)/zh"
+ echo ' ' > $(OUT_DIR)/zh/index.html
+
+# for local build, you may need compile current branch only, use the three jobs
+all-local: en-local zh-local
+
+en-local: init
+ $(POETRY_PRG) run sphinx-build "$(MAKEFILE_DIR)/en" "$(OUT_DIR)/en-local"
+
+zh-local: init
+ $(POETRY_PRG) run sphinx-build "$(MAKEFILE_DIR)/zh" "$(OUT_DIR)/zh-local"
+
+init:
+ $(POETRY_PRG) install
+
+clean:
+ rm -rvf "$(OUT_DIR)"
diff --git a/docs/README.md b/docs/README.md
index 516b512b040..2e715cdac66 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -35,7 +35,7 @@ If you fail to create conda envs by `environment.yml`, try following commands.
4. pip3 install sphinx-multiversion
5. pip3 install myst-parser
6. pip3 install sphinx-book-theme
- 7. pip3 install sphinx_copybutton
+ 7. pip3 install sphinx-copybutton
8. pip3 install myst-parser[linkify]
```
## Tips
diff --git a/docs/en/about/release_notes.md b/docs/en/about/release_notes.md
index 4e8d74c4610..e61e84e69d9 100644
--- a/docs/en/about/release_notes.md
+++ b/docs/en/about/release_notes.md
@@ -1,5 +1,114 @@
# Release Notes
+## v0.6.3 Release Notes
+
+### Features
+- Support setting the configuration of `glog` for clients (#2482 @vagetablechicken)
+- Add the checksum of SHA256 for release packages (#2560 @team-317)
+- Support the new build-in function `unhex` (#2431 @aucker)
+- Support the readable date and time format in CLI (#2568 @dl239)
+- Support the `LAST JOIN` with a subquery as a producer of window node in the request mode (#2569 @aceforeverd)
+- Upgrade the Spark version to 3.2.1 (#2566 @tobegit3hub, #2635 @dl239)
+- Support setting the SQL cache size in SDKs (#2605 @vagetablechicken)
+- Add a new interface of `ValidateSQL` to validate the syntax of SQL (#2626 @vagetablechicken)
+- Improve the documents (#2405 #2492 $2562 #2496 #2495 #2436 #2487 #2623 @michelle-qinqin, #2543 @linjing-lab, #2584 @JourneyGo, #2567 #2583 @vagetablechicken, #2643 @dl239)
+- Other minor features (#2504 #2572 #2498 #2598 @aceforeverd, #2555 #2641 @tobegit3hub, #2550 @zhanghaohit, #2595 @Elliezza, #2592 @vagetablechicken)
+
+### Bug Fixes
+- After a nameserver restarting, deployments may not recover. (#2533 @dl239)
+- If the type of first column is `bool`, it fails to resolve the function `count_where`. (#2570 @aceforeverd)
+- Other minor bug fixes (#2540 #2577 #2625 #2655 @dl239, #2585 @snehalsenapati23, #2539 @vagetablechicken)
+
+### Code Refactoring
+#2516 #2520 #2522 #2521 #2542 #2531 #2581 @haseeb-xd, #2525 #2526 #2527 #2528 @kstrifonoff, #2523 @ighmaZ, #2546 #2549 @NevilleMthw, #2559 @marandabui, #2554 @gokullan, #2580 @team-317, #2599 @lbartyczak, #2594 @shivamgupta-sg, #2571 @Jake-00
+
+## v0.6.2 Release Notes
+
+### Features
+- Support independently executing the OpenMLDB offline engine without the OpenMLDB deployment (#2423 @tobegit3hub)
+- Support the log setting of ZooKeeper and disable ZooKeeper logs in the diagnostic tool (#2451 @vagetablechicken)
+- Support query parameters of the SQL query APIs (#2277 @qsliu2017)
+- Improve the documents (#2406 @aceforeverd, #2408 #2414 @vagetablechicken, #2410 #2402 #2356 #2374 #2396 #2376 #2419 @michelle-qinqin, #2424 #2418 @dl239, #2455 @lumianph, #2458 @tobegit3hub)
+- Other minor features (#2420 @aceforeverd, #2411 @wuyou10206, #2446 #2452 @vagetablechicken, #2475 @tobegit3hub)
+
+### Bug Fixes
+- Table creation succeeds even if `partitionnum` is set to 0, which should report an error. (#2220 @dl239)
+- There are thread races in aggregators if there are concurrent `puts`. (#2472 @zhanghaohit)
+- The `limit` clause dose not work if it is used with the `where` and `group by` clauses. (#2447 @aceforeverd)
+- The `TaskManager` process will terminate if ZooKeeper disconnects. (#2494 @tobegit3hub)
+- The replica cluster dose not create the database if a database is created in the leader cluster (#2488 @dl239)
+- When there is data in base tables, deployment with long windows still can be executed (which should report an error). (#2501 @zhanghaohit)
+- Other minor bug fixes (#2415 @aceforeverd, #2417 #2434 #2435 #2473 @dl239, #2466 @vagetablechicken)
+
+### Code Refactoring
+#2413 @dl239, #2470 #2467 #2468 @vagetablechicken
+
+## v0.6.1 Release Notes
+
+### Features
+- Support new build-in functions `last_day` and `regexp_like` (#2262 @HeZean, #2187 @jiang1997)
+- Support Jupyter Notebook for the TalkingData use case (#2354 @vagetablechicken)
+- Add a new API to disable Saprk logs of the batch engine (#2359 @tobegit3hub)
+- Add the use case of precision marketing based on OneFlow (#2267 @Elliezza @vagetablechicken @siqi)
+- Support the RPC request timeout in CLI and Python SDK (#2371 @vagetablechicken)
+- Improve the documents (#2021 @liuceyim, #2348 #2316 #2324 #2361 #2315 #2323 #2355 #2328 #2360 #2378 #2319 #2350 #2395 #2398 @michelle-qinqin, #2373 @njzyfr, #2370 @tobegit3hub, #2367 #2382 #2375 #2401 @vagetablechicken, #2387 #2394 @dl239, #2379 @aceforeverd, #2403 @lumianph, #2400 gitpod-for-oss @aceforeverd, )
+- Other minor features (#2363 @aceforeverd, #2185 @qsliu2017)
+
+### Bug Fixes
+- `APIServer` will core dump if no `rs` in `QueryResp`. (#2346 @vagetablechicken)
+- Data has not been deleted from `pre-aggr` tables if there are delete operations in a main table. (#2300 @zhanghaohit)
+- Task jobs will core dump when enabling `UnsafeRowOpt` with multiple threads in the Yarn cluster. (#2352 #2364 @tobegit3hub)
+- Other minor bug fixes (#2336 @dl239, #2337 @dl239, #2385 #2372 @aceforeverd, #2383 #2384 @vagetablechicken)
+
+### Code Refactoring
+#2310 @hv789, #2306 #2305 @yeya24, #2311 @Mattt47, #2368 @TBCCC, #2391 @PrajwalBorkar, #2392 @zahyaah, #2405 @wang-jiahua
+
+## v0.6.0 Release Notes
+
+### Highlights
+
+- Add a new toolkit of managing OpenMLDB, currently including a diagnostic tool and a log collector (#2299 #2326 @dl239 @vagetablechicken)
+- Support aggregate functions with suffix `_where` using pre-aggregation (#1821 #1841 #2321 #2255 #2321 @aceforeverd @nautaa @zhanghaohit)
+- Support a new SQL syntax of `EXCLUDE CURRENT_ROW` (#2053 #2165 #2278 @aceforeverd)
+- Add new OpenMLDB ecosystem plugins for DolphinScheduler (#1921 #1955 @vagetablechicken) and Airflow (#2215 @vagetablechicken)
+
+### Other Features
+
+- Support SQL syntax of `DELETE` in SQL and Kafka Connector (#2183 #2257 @dl239)
+- Support customized order in the `insert` statement (#2075 @vagetablechicken)
+- Add a new use case of TalkingData AdTracking Fraud Detection (#2008 @vagetablechicken)
+- Improve the startup script to remove `mon` (#2050 @dl239)
+- Improve the performance of offline batch SQL engine (#1882 #1943 #1973 #2142 #2273 #1773 @tobegit3hub)
+- Support returning version numbers from TaskManager (#2102 @tobegit3hub)
+- Improve the CICD workflow and release procedure (#1873 #2025 #2028 @mangoGoForward)
+- Support GitHub Codespaces (#1922 @nautaa)
+- Support new built-in functions `char(int)`, `char_length`, `character_length`, `radians`, `hex`, `median` (#1896 #1895 #1897 #2159 #2030 @wuxiaobai24 @HGZ-20 @Ivyee17)
+- Support returning result set for a new query API (#2189 @qsliu2017)
+- Improve the documents (#1796 #1817 #1818 #2254 #1948 #2227 #2254 #1824 #1829 #1832 #1840 #1842 #1844 #1845 #1848 #1849 #1851 #1858 #1875 #1923 #1925 #1939 #1942 #1945 #1957 #2031 #2054 #2140 #2195 #2304 #2264 #2260 #2257 #2254 #2247 #2240 #2227 #2115 #2126 #2116 #2154 #2152 #2178 #2147 #2146 #2184 #2138 #2145 #2160 #2197 #2198 #2133 #2224 #2223 #2222 #2209 #2248 #2244 #2242 #2241 #2226 #2225 #2221 #2219 #2201 #2291 # 2231 #2196 #2297 #2206 #2238 #2270 #2296 #2317 #2065 #2048 #2088 #2331 #1831 #1945 #2118 @ZtXavier @pearfl @PrajwalBorkar @tobegit3hub @ZtXavier @zhouxh19 @dl239 @vagetablechicken @tobegit3hub @aceforeverd @jmoldyvan @lumianph @bxiiiiii @michelle-qinqin @yclchuxue @redundan3y)
+
+### Bug Fixes
+
+- The SQL engine may produce incorrect results under certain circumstances. (#1950 #1997 #2024 @aceforeverd)
+- The `genDDL` function generates incorrect DDL if the SQL is partitioned by multiple columns. (#1956 @dl239)
+- The snapshot recovery may fail for disk tables. (#2174 @zhanghaohit)
+- `enable_trace` does not work for some SQL queries. (#2292 @aceforeverd)
+- Tablets cannot save `ttl` when updating the `ttl` of index. (#1935 @dl239)
+- MakeResultSet uses a wrong schema in projection. (#2049 @dl239)
+- A table does not exist when deploying SQL by the APIServer (#2205 @vagetablechicken)
+- The cleanup for ZooKeep does not work properly. (#2191 @mangoGoForward)
+
+Other minor bug fixes (#2052 #1959 #2253 #2273 #2288 #1964 #2175 #1938 #1963 #1956 #2171 #2036 #2170 #2236 #1867 #1869 #1900 #2162 #2161 #2173 #2190 #2084 #2085 #2034 #1972 #1408 #1863 #1862 #1919 #2093 #2167 #2073 #1803 #1998 #2000 #2012 #2055 #2174 #2036 @Xeonacid @CuriousCorrelation @Shigm1026 @jiang1997 @Harshvardhantomar @nautaa @Ivyee17 @frazie @PrajwalBorkar @dl239 @aceforeverd @tobegit3hub @dl239 @vagetablechicken @zhanghaohit @mangoGoForward @SaumyaBhushan @BrokenArrow1404 @harshlancer)
+
+### Code Refactoring
+
+#1884 #1917 #1953 #1965 #2017 #2033 #2044 @mangoGoForward; #2131 #2130 #2112 #2113 #2104 #2107 #2094 #2068 #2071 #2070 #1982 #1878 @PrajwalBorkar; #2158 #2051 #2037 #2015 #1886 #1857 @frazie; #2100 #2096 @KikiDotPy; #2089 @ayushclashroyale; #1994 @fpetrakov; #2079 kayverly; #2062 @WUBBBB; #1843 @1korenn; #2092 @HeZean; #1984 @0sirusD3m0n; #1976 @Jaguar16; #2086 @marc-marcos; #1999 @Albert-Debbarma;
+
+## v0.5.3 Release Notes
+
+### Bug Fixes
+- The SQL file cannot be successfully loaded in the Yarn-Client mode. (#2151 @tobegit3hub)
+- The SQL file cannot be successfully loaded in the Yarn-Cluster mode. (#1993 @tobegit3hub)
+
## v0.5.2 Release Notes
### Features
diff --git a/docs/en/conf.py b/docs/en/conf.py
index 51653bfcc0a..ea10bd8f84e 100644
--- a/docs/en/conf.py
+++ b/docs/en/conf.py
@@ -35,8 +35,13 @@
'myst_parser',
'sphinx_multiversion',
'sphinx_copybutton',
+'sphinx.ext.autosectionlabel',
]
+autosectionlabel_prefix_document = True
+
+myst_heading_anchors = 6
+
myst_enable_extensions = [
"amsmath",
"colon_fence",
@@ -119,3 +124,19 @@
html_static_path = []
html_logo = "about/images/openmldb_logo.png"
+
+
+# ================================== #
+# sphinx multiversion configuration #
+# ================================== #
+
+# Whitelist pattern for tags (set to None to ignore all tags)
+# no tags included
+smv_tag_whitelist = None
+
+# Whitelist pattern for branches (set to None to ignore all branches)
+# include branch that is main or v{X}.{Y}
+smv_branch_whitelist = r"^(main|v\d+\.\d+)$"
+
+# allow remote origin or upstream
+smv_remote_whitelist = r"^(origin|upstream)$"
diff --git a/docs/en/deploy/compile.md b/docs/en/deploy/compile.md
index 772c6047dd3..e44372ae602 100644
--- a/docs/en/deploy/compile.md
+++ b/docs/en/deploy/compile.md
@@ -7,25 +7,25 @@
This section describes the steps to compile and use OpenMLDB inside its official docker image [hybridsql](https://hub.docker.com/r/4pdosc/hybridsql).
The docker image has packed required tools and dependencies, so there is no need to set them up separately. To compile without the official docker image, refer to the section [Detailed Instructions for Build](#detailed-instructions-for-build) below.
-Keep in mind that you should always use the same version of both compile image and [OpenMLDB version](https://github.com/4paradigm/OpenMLDB/releases). This section demonstrates compiling for [OpenMLDB v0.5.0](https://github.com/4paradigm/OpenMLDB/releases/tag/v0.5.0) under `hybridsql:0.5.0` ,If you prefer to compile on the latest code in `main` branch, pull `hybridsql:latest` image instead.
+Keep in mind that you should always use the same version of both compile image and [OpenMLDB version](https://github.com/4paradigm/OpenMLDB/releases). This section demonstrates compiling for [OpenMLDB v0.6.3](https://github.com/4paradigm/OpenMLDB/releases/tag/v0.6.3) under `hybridsql:0.6.3` ,If you prefer to compile on the latest code in `main` branch, pull `hybridsql:latest` image instead.
1. Pull the docker image
```bash
- docker pull 4pdosc/hybridsql:0.5
+ docker pull 4pdosc/hybridsql:0.6
```
2. Create a docker container with the hybridsql docker image
```bash
- docker run -it 4pdosc/hybridsql:0.5 bash
+ docker run -it 4pdosc/hybridsql:0.6 bash
```
-3. Download the OpenMLDB source code inside the docker container, and setting the branch into v0.5.0
+3. Download the OpenMLDB source code inside the docker container, and setting the branch into v0.6.3
```bash
cd ~
- git clone -b v0.5.0 https://github.com/4paradigm/OpenMLDB.git
+ git clone -b v0.6.3 https://github.com/4paradigm/OpenMLDB.git
```
4. Compile OpenMLDB
@@ -142,7 +142,7 @@ make CMAKE_BUILD_TYPE=Debug
1. Downloading the pre-built OpenMLDB Spark distribution:
```bash
-wget https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.2.3/spark-3.0.0-bin-openmldbspark.tgz
+wget https://github.com/4paradigm/spark/releases/download/v3.2.1-openmldb0.6.3/spark-3.2.1-bin-openmldbspark.tgz
```
Alternatively, you can also download the source code and compile from scratch:
@@ -156,8 +156,8 @@ cd ./spark/
2. Setting up the environment variable `SPARK_HOME` to make the OpenMLDB Spark distribution for OpenMLDB or other Spark applications
```bash
-tar xzvf ./spark-3.0.0-bin-openmldbspark.tgz
-cd spark-3.0.0-bin-openmldbspark/
+tar xzvf ./spark-3.2.1-bin-openmldbspark.tgz
+cd spark-3.2.1-bin-openmldbspark/
export SPARK_HOME=`pwd`
```
diff --git a/docs/en/deploy/conf.md b/docs/en/deploy/conf.md
index 2ab87ba91ab..bcec8ddc94e 100644
--- a/docs/en/deploy/conf.md
+++ b/docs/en/deploy/conf.md
@@ -20,15 +20,15 @@
#--thread_pool_size=16
# Configure the number of retry attempts, the default is 3
#--request_max_retry=3
-# Configure the request timeout, the default is 12 seconds
+# Configure the request timeout in milliseconds, the default is 12 seconds
#--request_timeout_ms=12000
-# Configure the retry interval when the request is unreachable, generally do not need to be modified
+# Configure the retry interval when the request is unreachable, generally do not need to be modified, in milliseconds
#--request_sleep_time=1000
# Configure the zookeeper session timeout in milliseconds
--zk_session_timeout=10000
# Configure the zookeeper health check interval, the unit is milliseconds, generally do not need to be modified
#--zk_keep_alive_check_interval=15000
-# Configure the timeout period for tablet heartbeat detection, the default is 1 minute. If the tablet is still unreachable after this time, the nameserver considers that the tablet is unavailable and will perform the operation of offline the node
+# Configure the timeout period for tablet heartbeat detection in milliseconds, the default is 1 minute. If the tablet is still unreachable after this time, the nameserver considers that the tablet is unavailable and will perform the operation of offline the node
--tablet_heartbeat_timeout=60000
# Configure the tablet health check interval, in milliseconds
#--tablet_offline_check_interval=1000
@@ -39,13 +39,13 @@
#--name_server_task_concurrency=2
# The maximum number of concurrent execution of high-availability tasks
#--name_server_task_max_concurrency=8
-# Check the waiting time of the task when executing the task
+# Check the waiting time of the task when executing the task in milliseconds
#--name_server_task_wait_time=1000
-# The maximum time to execute the task, if it exceeds, it will log
+# The maximum time to execute the task, if it exceeds, it will log. The unit is milliseconds
#--name_server_op_execute_timeout=7200000
-# The time interval of receiving the status of the next task
+# The time interval of receiving the status of the next task in milliseconds
#--get_task_status_interval=2000
-# The time interval of receiving the status of the next table
+# The time interval of receiving the status of the next table in milliseconds
#--get_table_status_interval=2000
# Check the minimum difference of binlog synchronization progress, if the master-slave offset is less than this value, the task has been successfully synchronized
#--check_binlog_sync_progress_delta=100000
@@ -88,9 +88,9 @@
--openmldb_log_dir=./logs
# binlog conf
-# Binlog wait time when no new data is added
+# Binlog wait time when no new data is added, in milliseconds
#--binlog_coffee_time=1000
-# Master-slave matching offset waiting time
+# Master-slave matching offset waiting time, in milliseconds
#--binlog_match_logoffset_interval=1000
# Whether to notify the follower to synchronize immediately when data is written
--binlog_notify_on_put=true
@@ -121,9 +121,9 @@
# snapshot conf
# Configure the time to do snapshots, the time of day. For example, 23 means taking a snapshot at 23 o'clock every day.
--make_snapshot_time=23
-# Check interval for snapshots
+# Check interval for snapshots, in milliseconds
#--make_snapshot_check_interval=600000
-# Set the offset threshold of the snapshot, if the offset difference from the last snapshot is less than this value, no new snapshot will be generated
+# Set the offset threshold of the snapshot, if the offset difference from the last snapshot is less than this value, no new snapshot will be generated, in milliseconds
#--make_snapshot_threshold_offset=100000
# snapshot thread pool size
#--snapshot_pool_size=1
diff --git a/docs/en/deploy/install_deploy.md b/docs/en/deploy/install_deploy.md
index a4033a45c74..4578cfe8cf2 100644
--- a/docs/en/deploy/install_deploy.md
+++ b/docs/en/deploy/install_deploy.md
@@ -9,7 +9,7 @@
* The number of cores is recommended to be no less than 4 cores. If the CPU does not support the AVX2 instruction set in the Linux environment, the deployment package needs to be recompiled from the source code.
## Deployment Package
-The precompiled OpenMLDB deployment package is used by default in this documentation ([Linux](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz) , [macOS](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-darwin.tar.gz)), the supported operating system requirements are: CentOS 7, Ubuntu 20.04, macOS >= 10.15. If the user wishes to compile by himself (for example, for OpenMLDB source code development, the operating system or CPU architecture is not in the support list of the precompiled deployment package, etc.), the user can choose to compile and use in the docker container or compile from the source code. For details, please refer to our [compile documentation](compile.md).
+The precompiled OpenMLDB deployment package is used by default in this documentation ([Linux](https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz) , [macOS](https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-darwin.tar.gz)), the supported operating system requirements are: CentOS 7, Ubuntu 20.04, macOS >= 10.15. If the user wishes to compile by himself (for example, for OpenMLDB source code development, the operating system or CPU architecture is not in the support list of the precompiled deployment package, etc.), the user can choose to compile and use in the docker container or compile from the source code. For details, please refer to our [compile documentation](compile.md).
## Configure Environment (Linux)
@@ -78,10 +78,10 @@ OpenMLDB standalone version needs to deploy a nameserver and a tablet. The names
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-tablet-0.5.2
-cd openmldb-tablet-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-tablet-0.6.3
+cd openmldb-tablet-0.6.3
```
#### 2. Modify the Configuration File: conf/standalone_tablet.flags
@@ -100,7 +100,7 @@ cd openmldb-tablet-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start standalone_tablet
+bash bin/start.sh start standalone_tablet
```
**Notice**: After the service is started, the standalone_tablet.pid file will be generated in the bin directory, and the process number at startup will be saved in it. If the pid inside the file is running, the startup will fail.
@@ -110,10 +110,10 @@ sh bin/start.sh start standalone_tablet
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-ns-0.5.2
-cd openmldb-ns-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-ns-0.6.3
+cd openmldb-ns-0.6.3
```
#### 2. Modify the Configuration File: conf/standalone_nameserver.flags
@@ -131,7 +131,7 @@ cd openmldb-ns-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start standalone_nameserver
+bash bin/start.sh start standalone_nameserver
```
#### 4. Verify the Running Status of the Service
@@ -153,10 +153,10 @@ Before starting the APIServer, make sure that the OpenMLDB cluster has been star
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-apiserver-0.5.2
-cd openmldb-apiserver-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-apiserver-0.6.3
+cd openmldb-apiserver-0.6.3
```
#### 2. Modify the Configuration File: conf/standalone_apiserver.flags
@@ -176,7 +176,7 @@ cd openmldb-apiserver-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start standalone_apiserver
+bash bin/start.sh start standalone_apiserver
```
## Deploy Cluster Version
@@ -193,6 +193,7 @@ It is recommended to deploy version 3.4.14. If there is an available zookeeper c
```
wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
+tar -zxvf zookeeper-3.4.14.tar.gz
cd zookeeper-3.4.14
cp conf/zoo_sample.cfg conf/zoo.cfg
```
@@ -209,7 +210,7 @@ clientPort=7181
#### 3. Start Zookeeper
```
-sh bin/zkServer.sh start
+bash bin/zkServer.sh start
```
Deploy the Zookeeper cluster [refer to here](https://zookeeper.apache.org/doc/r3.4.14/zookeeperStarted.html#sc_RunningReplicatedZooKeeper).
@@ -219,10 +220,10 @@ Deploy the Zookeeper cluster [refer to here](https://zookeeper.apache.org/doc/r3
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-tablet-0.5.2
-cd openmldb-tablet-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-tablet-0.6.3
+cd openmldb-tablet-0.6.3
```
#### 2. Modify the Configuration File: conf/tablet.flags
@@ -249,7 +250,7 @@ cd openmldb-tablet-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start tablet
+bash bin/start.sh start tablet
```
Repeat the above steps to deploy multiple tablets.
@@ -265,10 +266,10 @@ Repeat the above steps to deploy multiple tablets.
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-ns-0.5.2
-cd openmldb-ns-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-ns-0.6.3
+cd openmldb-ns-0.6.3
```
#### 2. Modify the Configuration File: conf/nameserver.flags
@@ -281,7 +282,6 @@ cd openmldb-ns-0.5.2
--endpoint=172.27.128.31:6527
--zk_cluster=172.27.128.33:7181,172.27.128.32:7181,172.27.128.31:7181
--zk_root_path=/openmldb_cluster
---enable_distsql=true
```
**Notice:** The endpoint cannot use 0.0.0.0 and 127.0.0.1.
@@ -289,7 +289,7 @@ cd openmldb-ns-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start nameserver
+bash bin/start.sh start nameserver
```
Repeat the above steps to deploy multiple nameservers.
@@ -312,10 +312,10 @@ Before running, make sure that the OpenMLDB cluster has been started, otherwise
#### 1. Download the OpenMLDB Deployment Package
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-apiserver-0.5.2
-cd openmldb-apiserver-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-apiserver-0.6.3
+cd openmldb-apiserver-0.6.3
```
#### 2. Modify the Configuration File: conf/apiserver.flags
@@ -339,7 +339,7 @@ cd openmldb-apiserver-0.5.2
#### 3. Start the Service
```
-sh bin/start.sh start apiserver
+bash bin/start.sh start apiserver
```
**Notice:** If the program crashes when starting the nameserver/tablet/apiserver using the OpenMLDB release package, it is very likely that the instruction set is incompatible, and you need to compile OpenMLDB through the source code. For source code compilation, please refer to [here](./compile.md), you need to use method 3 to compile the complete source code.
@@ -349,12 +349,12 @@ sh bin/start.sh start apiserver
#### 1. Download the OpenMLDB Spark Distribution that is Optimized for Feature Engineering
```
-wget https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.5.2/spark-3.0.0-bin-openmldbspark.tgz
-tar -zxvf spark-3.0.0-bin-openmldbspark.tgz
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-taskmanager-0.5.2
-cd openmldb-taskmanager-0.5.2
+wget https://github.com/4paradigm/spark/releases/download/v3.2.1-openmldb0.6.3/spark-3.2.1-bin-openmldbspark.tgz
+tar -zxvf spark-3.2.1-bin-openmldbspark.tgz
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-taskmanager-0.6.3
+cd openmldb-taskmanager-0.6.3
```
#### 2. Modify the Configuration File conf/taskmanager.properties
@@ -382,7 +382,7 @@ spark.home=
#### 3. Start the Service
```bash
-bin/start.sh start taskmanager
+bash bin/start.sh start taskmanager
```
#### 4. Verify the Running Status of the Service
diff --git a/docs/en/maintain/diagnose.md b/docs/en/maintain/diagnose.md
new file mode 100644
index 00000000000..15e73543868
--- /dev/null
+++ b/docs/en/maintain/diagnose.md
@@ -0,0 +1,84 @@
+# Diagnostic Tool
+
+## Overview
+
+OpenMLDB provides a diagnostic tool to diagnose problems conveniently for users. It can check items as below:
+
+- Version
+- Configuration
+- Log
+- Run test SQL
+
+## Usage
+
+1. Download diagnostic tool
+```bash
+pip install openmldb-tool
+```
+
+2. Config cluster distribution
+
+standalone yaml conf
+```yaml
+mode: standalone
+nameserver:
+ -
+ endpoint: 127.0.0.1:6527
+ path: /work/openmldb
+tablet:
+ -
+ endpoint: 127.0.0.1:9527
+ path: /work/openmldb
+```
+
+cluster yaml conf
+```yaml
+mode: cluster
+zookeeper:
+ zk_cluster: 127.0.0.1:2181
+ zk_root_path: /openmldb
+nameserver:
+ -
+ endpoint: 127.0.0.1:6527
+ path: /work/ns1
+tablet:
+ -
+ endpoint: 127.0.0.1:9527
+ path: /work/tablet1
+ -
+ endpoint: 127.0.0.1:9528
+ path: /work/tablet2
+taskmanager:
+ -
+ endpoint: 127.0.0.1:9902
+ path: /work/taskmanager1
+```
+
+3. Setup SSH Passwordless Login
+
+As diagnostic tool will pull conf and log files from remote nodes when checking cluster, SSH passwordless shoud be setup. If you do not konw how to set, you can refer [here]((https://www.itzgeek.com/how-tos/linux/centos-how-tos/ssh-passwordless-login-centos-7-rhel-7.html))
+
+4. Run diagnostic tool
+
+```bash
+openmldb_tool --dist_conf=/tmp/standalone_dist.yml
+```
+
+There are some advanced options can be specified as below:
+
+- --dist_conf: To config the distribution of cluster
+- --data_dir: The data dir to store the conf and log files pulled from remote. The default value is `/tmp/diagnose_tool_data`
+- --check: The item to check. The default value is `ALL`. It can be specified as `CONF/LOG/SQL/VERSION`
+- --exclude: The item do not check. Only work if `check` option is `ALL`. It can be specified as `CONF/LOG/SQL/VERSION`
+- --log_level: The default value is `info`. It can be specified as `debug/warn/info`
+- --log_dir: Specific the output dir. It will print to stdout if not set
+- --env: If the cluster is started with `start-all.sh` script, `onebox` should be setted.
+
+For instance, we can check `conf` only and print the ouput to local dir as below:
+```
+openmldb_tool --dist_conf=/tmp/cluster_dist.yml --check=conf --log_dir=./
+```
+
+**Note**: If you want to diagnostie standalone mode OpenMLDB, you need to run diagnostic tool on the OpenMLDB node.
+
+You can use `openmldb_tool --helpfull` to check all options. e.g. `--sdk_log` can print the log in sdk(zk, glog) for debug.
\ No newline at end of file
diff --git a/docs/en/maintain/faq.md b/docs/en/maintain/faq.md
index 87e5bb08b80..ce4c704aed5 100644
--- a/docs/en/maintain/faq.md
+++ b/docs/en/maintain/faq.md
@@ -55,10 +55,10 @@ This happens easily when using synchronized offline commands. you can use
```
To adjust the timeout time of rpc, use 'ms' units.
#### normal request
-If it is a simple query or insert, there will be a timeout, and the general `request_timeout` configuration needs to be changed.
-1. CLI: cannot be changed at this time
+If it is a simple query or insert, still get timeout, the general `request_timeout` configuration needs to be changed.
+1. CLI: set `--request_timeout` before running
2. JAVA: SDK direct connection, adjust `SdkOption.requestTimeout`; JDBC, adjust the parameter `requestTimeout` in url
-3. Python: cannot be changed at this time
+3. Python: SDK direct connection(DBAPI), adjust `connect()` arg `request_timeout`; SQLAlchemy, adjust the parameter `requestTimeout` in url
### 2. Why am I getting the warning log of Got EOF of Socket?
```
@@ -67,3 +67,41 @@ rpc_client.h:xxx] request error. [E1014]Got EOF of Socket{id=x fd=x addr=xxx} (x
This is because the `addr` side actively disconnected, and the address of `addr` is most likely taskmanager. This does not mean that the taskmanager is abnormal, but that the taskmanager side thinks that the connection is inactive and has exceeded the keepAliveTime, and actively disconnects the communication channel.
In version 0.5.0 and later, the taskmanager's `server.channel_keep_alive_time` can be increased to increase the tolerance of inactive channels. The default value is 1800s (0.5h), especially when using synchronous offline commands, this value may need to be adjusted appropriately.
In versions before 0.5.0, this configuration cannot be changed, please upgrade the taskmanager version.
+
+### 3. Why we get unrecognizable result of offline queries?
+
+When we are using offline queries, the result which contains Chinese may be printed as unrecognizable code. It is related with default system encoding and encoding configuration of Saprk jobs.
+
+If we have unrecognizable code, we can set the configuration `spark.driver.extraJavaOptions=-Dfile.encoding=utf-8` and `spark.executor.extraJavaOptions=-Dfile.encoding=utf-8` for Spark jobs.
+
+Here is the way to configure client in [Spark Client Config](../reference/client_config/client_spark_config.md) and we can add this configuration in TaskManager properties file as well.
+
+```
+spark.default.conf=spark.driver.extraJavaOptions=-Dfile.encoding=utf-8;spark.executor.extraJavaOptions=-Dfile.encoding=utf-8
+```
+
+### 4. How to config TaskManager to access Kerberos-enabled Yarn cluster?
+
+If Yarn cluster enables Kerberos authentication, we can add the following configuration to access the Kerberos-enabled Yarn cluster. Notice that we need to update the actual keytab file path and principle account.
+
+```
+spark.default.conf=spark.yarn.keytab=/tmp/test.keytab;spark.yarn.principal=test@EXAMPLE.COM
+```
+
+### 5. How to config the cxx log in client
+
+cxx log: zk log and sdk log(glog).
+
+zk log:
+1. CLI:set before running, `--zk_log_level`(int) to set zk log level,`--zk_log_file` to set log file(just file, not dir)
+2. JAVA/Python SDK:in option or url, set `zkLogLevel` and `zkLogFile`
+
+- `zk_log_level`(int, default=3, which is INFO):
+Log messages at or **below** this level. 0-disable all zk log, 1-error, 2-warn, 3-info, 4-debug.
+
+sdk log(glog):
+1. CLI:set before running, `--glog_level`(int) to set glog level,`--glog_dir`to set glog dir(a path, not a file)
+2. JAVA/Python SDK:in option or url, set `glogLevel` and`glogDir`
+
+- `glog_level`(int, default=0, which is INFO):
+Log messages at or **above** this level. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
diff --git a/docs/en/maintain/index.rst b/docs/en/maintain/index.rst
index 010835bf95d..4e9920ccc2a 100644
--- a/docs/en/maintain/index.rst
+++ b/docs/en/maintain/index.rst
@@ -10,4 +10,5 @@ Maintenance
monitoring
cli
faq
- scale
\ No newline at end of file
+ scale
+ diagnose
\ No newline at end of file
diff --git a/docs/en/maintain/scale.md b/docs/en/maintain/scale.md
index 7c8eca42b05..8323b1d7337 100644
--- a/docs/en/maintain/scale.md
+++ b/docs/en/maintain/scale.md
@@ -12,7 +12,7 @@ You need to first start a new tablet node as following steps, please refer to th
- Modify the configuration file: conf/tablet.flags
- Start a new tablet
```bash
- sh bin/start.sh start tablet
+ bash bin/start.sh start tablet
```
After startup, you need to check whether the new node has joined the cluster. If the `showtablet` command is executed and the new node endpoint is listed, it means that it has joined the cluster
@@ -74,10 +74,10 @@ Scaling in your cluster is to reduce the number of nodes in the cluster.
### Step 3. Making the targeted node offline
- Execute `stop` command
```bash
-sh bin/start.sh stop tablet
+bash bin/start.sh stop tablet
```
- If nameserver is deployed on the node, you need to disable the nameserver.
```bash
-sh bin/start.sh stop nameserver
+bash bin/start.sh stop nameserver
```
Note that, at least two Nameserver nodes are required to maintain high availability
diff --git a/docs/en/maintain/upgrade.md b/docs/en/maintain/upgrade.md
index 8ae0e130d16..0ef9d6e2d4e 100644
--- a/docs/en/maintain/upgrade.md
+++ b/docs/en/maintain/upgrade.md
@@ -8,14 +8,14 @@ Here is the impact when upgrading OpenMLDB:
* Stop nameserver
```bash
- sh bin/start.sh stop nameserver
+ bash bin/start.sh stop nameserver
```
* Backup the old versions directories `bin` and `conf`
* Download new version bin and conf
* Compare the configuration file diff and modify the necessary configuration, such as endpoint, zk_cluster, etc
* Start nameserver
```bash
- sh bin/start.sh start nameserver
+ bash bin/start.sh start nameserver
```
* Repeat the above steps for the remaining nameservers
@@ -25,14 +25,14 @@ Here is the impact when upgrading OpenMLDB:
* Stop tablet
```bash
- sh bin/start.sh stop tablet
+ bash bin/start.sh stop tablet
```
* Backup the old versions directories `bin` and `conf`
* Download new version bin and conf
* Compare the configuration file diff and modify the necessary configuration, such as endpoint, zk_cluster, etc
* Start nameserver
```bash
- sh bin/start.sh start tablet
+ bash bin/start.sh start tablet
```
* If auto_failover is closed, you must connect to the ns client and perform the following operations to restore data. **The endpoint after the command is the endpoint of the restarted node**
* offlineendpoint endpoint
diff --git a/docs/en/quickstart/java_sdk.md b/docs/en/quickstart/java_sdk.md
index 047f37bd5e6..d1dedea1090 100644
--- a/docs/en/quickstart/java_sdk.md
+++ b/docs/en/quickstart/java_sdk.md
@@ -9,12 +9,12 @@ Configure maven pom
com.4paradigm.openmldb
openmldb-jdbc
- 0.5.2
+ 0.6.3
com.4paradigm.openmldb
openmldb-native
- 0.5.2
+ 0.6.3
```
### Package Installation on Mac
@@ -24,15 +24,15 @@ Configure maven pom
com.4paradigm.openmldb
openmldb-jdbc
- 0.5.2
+ 0.6.3
com.4paradigm.openmldb
openmldb-native
- 0.5.2-macos
+ 0.6.3-macos
```
-Note that since `openmldb-native` contains the C++ static library compiled by OpenMLDB, by default it is a Linux's static library. On macOS, the version of the above openmldb-native needs to be changed to `0.5.2-macos`, and the version of openmldb-jdbc remains unchanged .
+Note that since `openmldb-native` contains the C++ static library compiled by OpenMLDB, by default it is a Linux's static library. On macOS, the version of the above openmldb-native needs to be changed to `0.6.3-macos`, and the version of openmldb-jdbc remains unchanged .
## 2. Quickstart
@@ -135,6 +135,41 @@ try {
}
```
+#### 2.4.2 Use Placeholder to Execute Batch Insert
+
+1. Using the `SqlClusterExecutor::getInsertPreparedStmt(db, insertSqlWithPlaceHolder)` interface to` get the InsertPrepareStatement`.
+2. Calling the `PreparedStatement::setType(index, value)` interface to fill data into `InsertPrepareStatement`.
+3. Using the `PreparedStatement::addBatch()` interface to build current row.
+4. Using the `PreparedStatement::setType(index, value)` and `PreparedStatement::addBatch()` to add new rows.
+5. Using the `PreparedStatement::executeBatch()` to execute batch insert.
+
+```java
+String insertSqlWithPlaceHolder = "insert into trans values(\"aa\", ?, 33, ?, 2.4, 1590738993000, \"2020-05-04\");";
+PreparedStatement pstmt = null;
+try {
+ pstmt = sqlExecutor.getInsertPreparedStmt(db, insertSqlWithPlaceHolder);
+ pstmt.setInt(1, 24);
+ pstmt.setInt(2, 1.5f);
+ pstmt.addBatch();
+ pstmt.setInt(1, 25);
+ pstmt.setInt(2, 1.6f);
+ pstmt.addBatch();
+ pstmt.executeBatch();
+} catch (SQLException e) {
+ e.printStackTrace();
+ Assert.fail();
+} finally {
+ if (pstmt != null) {
+ try {
+ // PrepareStatement must be closed after it is used up
+ pstmt.close();
+ } catch (SQLException throwables) {
+ throwables.printStackTrace();
+ }
+ }
+}
+```
+
### 2.5 Execute SQL Batch Query
1. Using the `SqlClusterExecutor::executeSQL(selectSql)` interface to execute SQL batch query statements:
@@ -247,6 +282,35 @@ You should use the `SqlClusterExecutor::dropDB(db)` interface to drop a specifie
sqlExecutor.dropDB(db);
```
+### 2.9 Delete all data under one key in specific index
+
+There two methods to delete as below:
+
+- use delete sql
+- use delete preparestatement
+
+```
+java.sql.Statement state = router.getStatement();
+try {
+ String sql = "DELETE FROM t1 WHERE col2 = 'key1';";
+ state.execute(sql);
+ sql = "DELETE FROM t1 WHERE col2 = ?;";
+ java.sql.PreparedStatement p1 = router.getDeletePreparedStmt("test", sql);
+ p1.setString(1, "key2");
+ p1.executeUpdate();
+ p1.close();
+} catch (Exception e) {
+ e.printStackTrace();
+ Assert.fail();
+} finally {
+ try {
+ state.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+}
+```
+
## 3. A Complete Example
```java
@@ -491,7 +555,6 @@ public class Demo {
}
}
-
private void setData(PreparedStatement pstmt, ResultSetMetaData metaData) throws SQLException {
for (int i = 0; i < metaData.getColumnCount(); i++) {
int columnType = metaData.getColumnType(i + 1);
diff --git a/docs/en/quickstart/openmldb_quickstart.md b/docs/en/quickstart/openmldb_quickstart.md
index c9909a598bc..bbaad4db459 100644
--- a/docs/en/quickstart/openmldb_quickstart.md
+++ b/docs/en/quickstart/openmldb_quickstart.md
@@ -17,7 +17,7 @@ If you wan to compile and install it by yourself, you can refer to our [installa
Pull the image (image download size is about 1GB, after decompression is about 1.7 GB) and start the docker container:
```bash
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
```{important}
@@ -328,22 +328,22 @@ SELECT c1, c2, sum(c3) OVER w1 AS w1_c3_sum FROM demo_table1 WINDOW w1 AS (PARTI
The computation is logically done as follows :
1. According to the request line and the `PARTITION BY` in window clause, filter out the lines whose `c1` is "aaa", and sort them according to `c6` from small to large. So theoretically, the intermediate data table after partition sorting is shown in the following table. Among them, the first row after the request behavior is sorted.
```
- ----- ---- ---- ---------- ----------- --------------- - -------------
- c1 c2 c3 c4 c5 c6 c7
- ----- ---- ---- ---------- ----------- --------------- - -------------
- aaa 11 22 1.2 1.3 1635247427000 2021-05-20
- aaa 12 22 2.200000 12.300000 1636097890000 1970-01-01
- aaa 11 22 1.200000 11.300000 1636097290000 1970-01-01
- ----- ---- ---- ---------- ----------- --------------- - -------------
+ ----- ---- ---- ---------- ----------- --------------- ------------
+ c1 c2 c3 c4 c5 c6 c7
+ ----- ---- ---- ---------- ----------- --------------- ------------
+ aaa 11 22 1.2 1.3 1635247427000 2021-05-20
+ aaa 11 22 1.200000 11.300000 1636097290000 1970-01-01
+ aaa 12 22 2.200000 12.300000 1636097890000 1970-01-01
+ ----- ---- ---- ---------- ----------- --------------- ------------
```
2. The window range is `2 PRECEDING AND CURRENT ROW`, so we cut out the real window in the above table, the request row is the smallest row, the previous 2 rows do not exist, but the window contains the current row, so the window has only one row (the request row).
3. Window aggregation is performed, to sum `c3` of the data in the window (only one row), and we have the result 22.
The output is:
```
- ----- ---- -------------
- c1 c2 w1_c3_sum
- ----- ---- -------------
- aaa 11 22
- ----- ---- -------------
+ ----- ---- -----------
+ c1 c2 w1_c3_sum
+ ----- ---- -----------
+ aaa 11 22
+ ----- ---- -----------
```
diff --git a/docs/en/quickstart/python_sdk.md b/docs/en/quickstart/python_sdk.md
index da63d62752b..66bd0a7b920 100644
--- a/docs/en/quickstart/python_sdk.md
+++ b/docs/en/quickstart/python_sdk.md
@@ -17,7 +17,7 @@ When creating the connection, the database name is not required to exist. If it
````python
import openmldb.dbapi
-db = openmldb.dbapi.connect("db1", "$zkcluster", "$zkpath")
+db = openmldb.dbapi.connect(database="db1", zk="$zkcluster", zkPath="$zkpath")
cursor = db.cursor()
````
@@ -178,7 +178,7 @@ OpenMLDB Python SDK supports Notebook magic function extension, you can use the
````python
import openmldb
-db = openmldb.dbapi.connect('demo_db','0.0.0.0:2181','/openmldb')
+db = openmldb.dbapi.connect(database='demo_db',zk='0.0.0.0:2181',zkPath='/openmldb')
openmldb.sql_magic.register(db)
````
diff --git a/docs/en/quickstart/rest_api.md b/docs/en/quickstart/rest_api.md
index 5ec5f9b30d9..d2583ed9ffc 100644
--- a/docs/en/quickstart/rest_api.md
+++ b/docs/en/quickstart/rest_api.md
@@ -1,5 +1,10 @@
# REST APIs
+## Important Information
+
+- As REST APIs interact with the OpenMLDB servers via APIServer, the APIServer must be deployed. The APIServer is an optional module, please refer to [this document](../deploy/install_deploy.md#Deploy-APIServer) for the deployment.
+- Currently, APIServer is mainly designed for function development and testing, thus it is not suggested to use it for performance benchmarking and deployed in production. There is no high-availability for the APIServer, and it also introduces overhead of networking and encoding/decoding.
+
## Data Insertion
The request URL: http://ip:port/dbs/{db_name}/tables/{table_name}
@@ -57,7 +62,7 @@ The request body:
```bash
curl http://127.0.0.1:8080/dbs/demo_db/deployments/demo_data_service -X POST -d'{
- "input": [["aaa", 11, 22, 1.2, 1.3, 1635247427000, "2021-05-20"]],
+ "input": [["aaa", 11, 22, 1.2, 1.3, 1635247427000, "2021-05-20"]]
}'
```
@@ -71,4 +76,188 @@ The response:
"data":[["aaa",11,22]]
}
}
-```
\ No newline at end of file
+```
+
+## Query
+
+The request URL: http://ip:port/dbs/{db_name}
+
+HTTP method: POST
+
+**Request Body Example**
+
+The query without parameter:
+
+```json
+{
+ "mode": "online",
+ "sql": "select 1"
+}
+```
+
+mode: "offsync", "offasync", "online"
+
+The response:
+
+```json
+{
+ "code":0,
+ "msg":"ok"
+}
+```
+
+The query with parameters:
+
+```json
+{
+ "mode": "online",
+ "sql": "SELECT c1, c2, c3 FROM demo WHERE c1 = ? AND c2 = ?",
+ "input": {
+ "schema": ["Int32", "String"],
+ "data": [1, "aaa"]
+ }
+}
+```
+
+all supported types (case-insensitive):
+`Bool`, `Int16`, `Int32`, `Int64`, `Float`, `Double`, `String`, `Date` and `Timestamp`.
+
+The response:
+
+```json
+{
+ "code":0,
+ "msg":"ok",
+ "data": {
+ "schema": ["Int32", "String", "Float"],
+ "data": [[1, "aaa", 1.2], [1, "aaa", 3.4]]
+ }
+}
+```
+
+## Get Deployment Info
+
+
+The request URL: http://ip:port/dbs/{db_name}/deployments/{deployment_name}
+
+HTTP method: Get
+
+The response:
+
+```json
+{
+ "code": 0,
+ "msg": "ok",
+ "data": {
+ "name": "",
+ "procedure": "",
+ "input_schema": [
+
+ ],
+ "input_common_cols": [
+
+ ],
+ "output_schema": [
+
+ ],
+ "output_common_cols": [
+
+ ],
+ "dbs": [
+
+ ],
+ "tables": [
+
+ ]
+ }
+}
+```
+
+
+## List Database
+
+The request URL: http://ip:port/dbs
+
+HTTP method: Get
+
+The response:
+
+```json
+{
+ "code": 0,
+ "msg": "ok",
+ "dbs": [
+
+ ]
+}
+```
+
+## List Table
+
+The request URL: http://ip:port/dbs/{db}/tables
+
+HTTP method: Get
+
+The response:
+
+```json
+{
+ "code": 0,
+ "msg": "ok",
+ "tables": [
+ {
+ "name": "",
+ "table_partition_size": 8,
+ "tid": ,
+ "partition_num": 8,
+ "replica_num": 2,
+ "column_desc": [
+ {
+ "name": "",
+ "data_type": "",
+ "not_null": false
+ }
+ ],
+ "column_key": [
+ {
+ "index_name": "",
+ "col_name": [
+
+ ],
+ "ttl": {
+
+ }
+ }
+ ],
+ "added_column_desc": [
+
+ ],
+ "format_version": 1,
+ "db": "",
+ "partition_key": [
+
+ ],
+ "schema_versions": [
+
+ ]
+ }
+ ]
+}
+```
+
+## Refresh
+
+The request URL: http://ip:port/refresh
+
+HTTP method: POST
+
+Empty request body.
+
+The response:
+
+```json
+{
+ "code":0,
+ "msg":"ok"
+}
+```
diff --git a/docs/en/reference/client_config/client_spark_config.md b/docs/en/reference/client_config/client_spark_config.md
new file mode 100644
index 00000000000..6e512737ab8
--- /dev/null
+++ b/docs/en/reference/client_config/client_spark_config.md
@@ -0,0 +1,29 @@
+# Spark Client Configuration
+
+## Set Spark Parameters For CLI
+
+The offline jobs of OpenMLDB are submitted as Spark jobs. Users can set default Spark parameters in TaskManager or set Spark parameters for each submission, and refer to [Spark Configuration](https://spark.apache.org/docs/latest/configuration.html) for more detailed configurations.
+
+If we want to set Spark parameters in SQL CLI, we can create the ini configuration file just like this.
+
+```
+[Spark]
+spark.driver.extraJavaOptions=-Dfile.encoding=utf-8
+spark.executor.extraJavaOptions=-Dfile.encoding=utf-8
+spark.driver.cores=1
+spark.default.parallelism=1
+spark.driver.memory=4g
+spark.driver.memoryOverhead=384
+spark.driver.memoryOverheadFactor=0.10
+spark.shuffle.compress=true
+spark.files.maxPartitionBytes=134217728
+spark.sql.shuffle.partitions=200
+```
+
+Take this for example if we save the configruation file as `/work/openmldb/bin/spark.conf`, we can start the SQL CLI with the parameter `--spark_conf` just like this.
+
+```
+./openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --spark_conf=/work/openmldb/bin/spark.conf
+```
+
+If the configuration file does not exist or is incorrect, we will get errors when submiting the offline jobs.
\ No newline at end of file
diff --git a/docs/en/reference/client_config/index.rst b/docs/en/reference/client_config/index.rst
new file mode 100644
index 00000000000..65b0f6c9073
--- /dev/null
+++ b/docs/en/reference/client_config/index.rst
@@ -0,0 +1,9 @@
+=============================
+Client Configuration
+=============================
+
+
+.. toctree::
+ :maxdepth: 1
+
+ client_spark_config
\ No newline at end of file
diff --git a/docs/en/reference/index.rst b/docs/en/reference/index.rst
index e8a76710d99..41e948f68e0 100644
--- a/docs/en/reference/index.rst
+++ b/docs/en/reference/index.rst
@@ -10,3 +10,4 @@ References
arch/index
sql/index
ip_tips
+ client_config/index
diff --git a/docs/en/reference/ip_tips.md b/docs/en/reference/ip_tips.md
index 857990b5c54..dcd8890167f 100644
--- a/docs/en/reference/ip_tips.md
+++ b/docs/en/reference/ip_tips.md
@@ -38,12 +38,12 @@ Expose the port through `-p` when starting the container, and the client can acc
The stand-alone version needs to expose the ports of three components (nameserver, tabletserver, apiserver):
```
-docker run -p 6527:6527 -p 9921:9921 -p 8080:8080 -it 4pdosc/openmldb:0.5.0 bash
+docker run -p 6527:6527 -p 9921:9921 -p 8080:8080 -it 4pdosc/openmldb:0.6.3 bash
```
The cluster version needs to expose the zk port and the ports of all components:
```
-docker run -p 2181:2181 -p 7527:7527 -p 10921:10921 -p 10922:10922 -p 8080:8080 -p 9902:9902 -it 4pdosc/openmldb:0.5.0 bash
+docker run -p 2181:2181 -p 7527:7527 -p 10921:10921 -p 10922:10922 -p 8080:8080 -p 9902:9902 -it 4pdosc/openmldb:0.6.3 bash
```
```{tip}
@@ -57,7 +57,7 @@ If the OpenMLDB service process is distributed, the "port number is occupied" ap
#### Host Network
Or more conveniently, use host networking without port isolation, for example:
```
-docker run --network host -it 4pdosc/openmldb:0.5.0 bash
+docker run --network host -it 4pdosc/openmldb:0.6.3 bash
```
But in this case, it is easy to find that the port is occupied by other processes in the host. If occupancy occurs, change the port number carefully.
diff --git a/docs/en/reference/sql/data_types/date_and_time_types.md b/docs/en/reference/sql/data_types/date_and_time_types.md
index 57a5de47be1..7c35f0a94d1 100644
--- a/docs/en/reference/sql/data_types/date_and_time_types.md
+++ b/docs/en/reference/sql/data_types/date_and_time_types.md
@@ -1,13 +1,13 @@
# Date and Time Type
-OpenMLDB supports date type `DATE` and timestamp `TIMESTAMP`
+OpenMLDB supports date type `DATE` and timestamp `TIMESTAMP`.
-Each time type has a valid range of values and a NULL value. The NULL value is used when specifying an invalid value that cannot be represented。
+Each time type has a valid range of values and a NULL value. The NULL value is used when specifying an invalid value that cannot be represented.
| Type | Size (bytes) | Scope | Format | Use |
| :-------- | :----------- | :----------------------------------------------------------- | :-------------- | :----------------------- |
| DATE | 4 | 1900-01-01 ~ | YYYY-MM-DD | Date Value |
-| TIMESTAMP | 8 | End Time is 1970-01-01 00:00:00/2038 **2147483647** Second,Beijing time **2038-1-19 11:14:07**,GMT January 19, 2038 Early Morning 03:14:07 | YYYYMMDD HHMMSS | Mixed Date and Time Value, Timestamp |
+| TIMESTAMP | 8 | ~ INT64_MAX | online: int64, offline `LOAD DATA`: int64 or 'yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]' | Mixed Date and Time Value, Timestamp |
## Time Zone Handling
diff --git a/docs/en/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md b/docs/en/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
index fa82ffb536a..26d25b8d81c 100644
--- a/docs/en/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
@@ -12,22 +12,22 @@ DBName ::=
**Description**
-The `CREATE DATABASE` statement is used to create a new database on OpenMLDB. The database name must be unique. If a database with the same name is created repeatedly, an error will occur.
+The `CREATE DATABASE` statement is used to create a new database on OpenMLDB. The database name must be unique. If a database with the same name already exists, an error will occur.
## **Example**
-Create a database named `db1`. If a database with the same name already exists, an error will be thrown.
+The following SQl command creates a database named `db1`. If a database with the same name already exists, an error will be thrown.
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
-After creating a database named `db2`:
+Then create a database named `db2`:
```sql
CREATE DATABASES db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
Show database list:
@@ -61,4 +61,4 @@ CREATE DATABASE db1;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](../ddl/SHOW_STATEMENT.md#show-databases)
\ No newline at end of file
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
diff --git a/docs/en/reference/sql/ddl/CREATE_INDEX_STATEMENT.md b/docs/en/reference/sql/ddl/CREATE_INDEX_STATEMENT.md
new file mode 100644
index 00000000000..6222b98942b
--- /dev/null
+++ b/docs/en/reference/sql/ddl/CREATE_INDEX_STATEMENT.md
@@ -0,0 +1,62 @@
+# CREATE INDEX
+
+The `CREATE INDEX` statement is used to create a new index on existing table. If there is data in the table, data will be loaded asynchronously.
+The job status can be checked through the `showopstatus` command of `ns_client`, see [Operations in CLI](../../../maintain/cli.md#showopstatus).
+
+## Syntax
+
+```sql
+CreateIndexstmt ::=
+ 'CREATE' 'INDEX' IndexName ON TableName IndexColumn OptOptionsList
+
+IndexName ::= Identifier
+
+TableName ::=
+ Identifier ('.' Identifier)?
+
+
+IndexColumn ::=
+ IndexColumnPrefix ")"
+
+IndexColumnPrefix ::=
+ "(" ColumnExpression
+ | IndexColumnPrefix "," ColumnExpression
+
+ColumnExpression ::=
+ Identifier
+
+OptOptionsList ::=
+ "OPTIONS" OptionList
+
+OptionList ::=
+ OptionsListPrefix ")"
+
+OptionsListPrefix ::=
+ "(" OptionEntry
+ | OptionsListPrefix "," OptionEntry
+
+OptionEntry ::=
+ Identifier "=" Identifier
+
+```
+
+
+
+## **Example**
+```SQL
+CREATE INDEX index2 ON t5 (col2);
+-- SUCCEED
+```
+```{note}
+If `OPTIONS` is not provided, the SQL with the created index cannot be deployed online, since the index doesn't have TS (timestamp).
+```
+We can also set `TS` column as below:
+```SQL
+CREATE INDEX index3 ON t5 (col3) OPTIONS (ts=ts1, ttl_type=absolute, ttl=30d);
+-- SUCCEED
+```
+Please refer [here](./CREATE_TABLE_STATEMENT.md) for more details about `TTL` and `TTL_TYPE`.
+
+## Related SQL
+
+[DROP INDEX](./DROP_INDEX_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/en/reference/sql/ddl/CREATE_TABLE_STATEMENT.md b/docs/en/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
index 54e575f894a..a3a3b3919ae 100644
--- a/docs/en/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
@@ -1,4 +1,5 @@
# CREATE TABLE
+ The `CREATE TABLE` statement is used to create a table. The table name must be unique in one database.
## Syntax
@@ -6,35 +7,28 @@
CreateTableStmt ::=
'CREATE' 'TABLE' IfNotExists TableName (
TableElementList CreateTableSelectOpt | LikeTableWithOrWithoutParen ) OnCommitOpt
-
IfNotExists ::=
('IF' 'NOT' 'EXISTS')?
-
TableName ::=
Identifier ('.' Identifier)?
TableElementList ::=
TableElement ( ',' TableElement )*
-
TableElement ::=
- ColumnDef
-| ColumnIndex
+ ColumnDef | ColumnIndex
```
- The `CREATE TABLE` statement is used to create a table. The table name must be unique if it's in the same database. If the table with the same name is created repeatedly, an error will occur.
-The `table_element` list needs to be defined in the table creation statement. `table_element` is divided into column description `ColumnDef` and `Constraint`. OpenMLDB requires at least one ColumnDef in the `table_element` list.
+The `TableElementList` needs to be defined in the `CREATE TABLE` statement. `TableElementList` consists of `ColumnDef` (column definition) and `ColumnIndex`. OpenMLDB requires at least one `ColumnDef` in the `TableElementList`.
-### Related Syntax Elements
-#### Column Description ColumnDef (required)
+### ColumnDef (required)
```SQL
ColumnDef ::=
ColumnName ( ColumnType ) [ColumnOptionList]
-
-ColumnName
- ::= Identifier ( '.' Identifier ( '.' Identifier )? )?
+ColumnName ::=
+ Identifier ( '.' Identifier ( '.' Identifier )? )?
ColumnType ::=
'INT' | 'INT32'
@@ -44,47 +38,51 @@ ColumnType ::=
|'DOUBLE'
|'TIMESTAMP'
|'DATE'
+ |'BOOL'
|'STRING' | 'VARCHAR'
-ColumnOptionList
- ::= ColumnOption*
-ColumnOption
- ::= ['DEFAULT' DefaultValueExpr ] ['NOT' 'NULL']
+ColumnOptionList ::=
+ ColumnOption*
+ColumnOption ::=
+ ['DEFAULT' DefaultValueExpr ] ['NOT' 'NULL']
-DefaultValueExpr
- ::= int_literal | float_literal | double_literal | string_literal
+DefaultValueExpr ::=
+ int_literal | float_literal | double_literal | string_literal
```
-A table contains one or more columns. The column description `ColumnDef` for each column describes the column name, column type, and class configuration.
+A table contains one or more columns. The column description `ColumnDef` for each column describes the column name, column type, and options.
+
+- `ColumnName`: The name of the column in the table. Column names within the same table must be unique.
+- `ColumnType`: The data type of the column. To learn about the data types supported by OpenMLDB, please refer to [Data Types](../data_types/reference.md).
+- `ColumnOptionList`:
+ - `NOT NULL`: The column does not allow null values.
+ - `DEFAULT`: The default value of this column. It is recommended to configure the default value if `NOT NULL` is configured. In this case, when inserting data, if the value of the column is not defined, the default value will be inserted. If the `NOT NULL` attribute is configured but the `DEFAULT` value is not configured, OpenMLDB will throw an error when the change column value is not defined in the INSERT statement.
-- Column Name: The name of the column in the table. Column names within the same table must be unique.
-- Column Type: The type of the column. To learn about the data types supported by OpenMLDB, please refer to [Data Types](../data_types/reference.md).
-- Column Constraint Configuration:
- - `NOT NULL`: The configuration column does not allow null values.
- - `DEFAULT`: Configure column default values. The attribute of `NOT NULL` will also configure the default value of `DEFAULT`. In this case, when the data is checked, if the value of the column is not defined, the default value will be inserted. If the `NOT NULL` attribute is configured and the `DEFAULT` value is not configured, OpenMLDB will throw an error when the change column value is not defined in the insert statement.创建一张表
+#### Example
-##### Example: Create a Table
+**Example 1: Create a Table**
-Set the current database to `db1`, create a table `t1` in the current database, including the column `col0`, the column type is STRING
+The following SQL commands set the current database to `db1` and create a table `t1` in the current database, including the column named `col0`. The data type of `col0` is `STRING`.
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
-
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
-
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
```
-
-Specifies to create a table `t1` in the database `db1`, including the column `col0`, the column type is STRING
-
+The following SQL command shows how to create a table in a database which is not the database currently used.
```sql
-CREATE TABLE db1.t1 (col0 STRING, col1 int);
--- SUCCEED: Create successfully
-desc t1;
+CREATE TABLE db1.t2 (col0 STRING, col1 int);
+-- SUCCEED
+```
+Switch to database `db1` to see the details of the table just created.
+```sql
+USE db1;
+-- SUCCEED: Database changed
+desc t2;
--- ------- --------- ------ ---------
# Field Type Null Default
--- ------- --------- ------ ---------
@@ -96,131 +94,133 @@ desc t1;
--- -------------------- ------ ---- ------ ---------------
1 INDEX_0_1639524201 col0 - 0min kAbsoluteTime
--- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-##### Example: Create A Table, Configuration Columns Are Not Allowed To Be Empty NOT NULL
+**Example 2: Create a Duplicate Table**
+The following SQL command creates a table, whose name is the same as an existing table of this database.
```sql
-USE db1;
CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
-```
-
-```sql
-desc t1;
- --- ------- --------- ------ ---------
- # Field Type Null Default
- --- ------- --------- ------ ---------
- 1 col0 Varchar NO
- 2 col1 Int YES
- --- ------- --------- ------ ---------
- --- -------------------- ------ ---- ------ ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---- ------ ---------------
- 1 INDEX_0_1639523978 col0 - 0min kAbsoluteTime
- --- -------------------- ------ ---- ------ ---------------
+-- SUCCEED
+CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
+-- Error: table already exists
+CREATE TABLE t1 (col0 STRING NOT NULL, col1 string);
+-- Error: table already exists
```
-##### Example: Create A Table, Configurion Column Default Value
+**Example 3: Create a Table with NOT NULL on Certain Columns**
```sql
USE db1;
-CREATE TABLE t1 (col0 STRING DEFAULT "NA", col1 int);
--- SUCCEED: Create successfully
+-- SUCCEED: Database changed
+CREATE TABLE t3 (col0 STRING NOT NULL, col1 int);
+-- SUCCEED
```
```sql
-desc t1;
---- ------- --------- ------ ---------
- # Field Type Null Default
---- ------- --------- ------ ---------
- 1 col0 Varchar NO NA
- 2 col1 Int YES
---- ------- --------- ------ ---------
---- -------------------- ------ ---- ------ ---------------
- # name keys ts ttl ttl_type
---- -------------------- ------ ---- ------ ---------------
- 1 INDEX_0_1639524344 col0 - 0min kAbsoluteTime
---- -------------------- ------ ---- ------ ---------------
+desc t3;
+ --- ------- --------- ------ ---------
+ # Field Type Null Default
+ --- ------- --------- ------ ---------
+ 1 col0 Varchar NO
+ 2 col1 Int YES
+ --- ------- --------- ------ ---------
+ --- -------------------- ------ ---- ------ ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---- ------ ---------------
+ 1 INDEX_0_1657327434 col0 - 0min kAbsoluteTime
+ --- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-##### Example: Create A Table With The Same Name Repeatedly In The Same Database
+**Example 4: Create a Table with Default Value**
```sql
USE db1;
-CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
-CREATE TABLE t1 (col1 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
+-- SUCCEED: Database changed
+CREATE TABLE t3 (col0 STRING NOT NULL, col1 int);
+-- SUCCEED
+```
+
+```sql
+desc t3;
+ --- ------- --------- ------ ---------
+ # Field Type Null Default
+ --- ------- --------- ------ ---------
+ 1 col0 Varchar NO
+ 2 col1 Int YES
+ --- ------- --------- ------ ---------
+ --- -------------------- ------ ---- ------ ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---- ------ ---------------
+ 1 INDEX_0_1657327434 col0 - 0min kAbsoluteTime
+ --- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-#### ColumnIndex (optional)
+
+
+
+### ColumnIndex (optional)
```sql
-ColumnIndex
- ::= 'INDEX' IndexName '(' IndexOptionList ')'
+ColumnIndex ::=
+ 'INDEX' '(' IndexOptionList ')'
-IndexOptionList
- ::= IndexOption ( ',' IndexOption )*
-IndexOption
- ::= 'KEY' '=' ColumnNameList
- | 'TS' '=' ColumnName
- |
- | 'TTL' = int_literal
- | 'REPLICANUM' = int_literal
-
--- IndexKeyOption
-IndexKeyOption
- ::= 'KEY' '=' ColumnNameList
-ColumnNameList
- :: = '(' ColumnName (',' ColumnName)* ')'
--- IndexTsOption
-IndexTsOption
- ::= 'TS' '=' ColumnName
--- IndexTtlTypeOption
-IndexTtlTypeOption
- ::= 'TTL_TYPE' '=' TTLType
-TTLType ::=
- 'ABSOLUTE'
- | 'LATEST'
- | 'ABSORLAT'
- | 'ABSANDLAT'
+IndexOptionList ::=
+ IndexOption ( ',' IndexOption )*
--- IndexTtlOption
-IndexTtlOption
- ::= 'TTL' '=' int_literal|interval_literal
+IndexOption ::=
+ IndexOptionName '=' expr
+```
-interval_literal ::= int_literal 'S'|'D'|'M'|'H'
+Indexes can be used by database search engines to speed up data retrieval. Simply put, an index is a pointer to the data in a table. Configuring a column index generally requires configuring the index key (`KEY`), index time column (`TS`), `TTL` and `TTL_TYPE`.
+The index key must be configured, and other configuration items are optional. The following table introduces these configuration items in detail.
-```
+| Configuration Item | Note | Expression | Example |
+|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|
+| `KEY` | It defines the index column (required). OpenMLDB supports single-column indexes as well as joint indexes. When `KEY`=one column, a single-column index is configured. When `KEY`=multiple columns, the joint index of these columns is configured: several columns are spliced into a new string as an index in order. | Single-column index: `ColumnName`
Joint index:
`(ColumnName (, ColumnName)* ) ` | Single-column index: `INDEX(KEY=col1)`
Joint index: `INDEX(KEY=(col1, col2))` |
+| `TS` | It defines the index time column (optional). Data on the same index will be sorted by the index time column. When `TS` is not explicitly configured, the timestamp of data insertion is used as the index time. | `ColumnName` | `INDEX(KEY=col1, TS=std_time)`。 The index column is col1, and the data rows with the same col1 value are sorted by std_time. |
+| `TTL_TYPE` | It defines the elimination rules (optional). Including four types. When `TTL_TYPE` is not explicitly configured, the `ABSOLUTE` expiration configuration is used by default. | Supported expr: `ABSOLUTE`
`LATEST`
`ABSORLAT`
`ABSANDLAT`。 | For specific usage, please refer to **Configuration Rules for TTL and TTL_TYP** below. |
+| `TTL` | It defines the maximum survival time/number. Different TTL_TYPEs determines different `TTL` configuration methods. When `TTL` is not explicitly configured, `TTL=0` which means OpenMLDB will not evict records. | Supported expr: `int_literal`
`interval_literal`
`( interval_literal , int_literal )` | For specific usage, please refer to "Configuration Rules for TTL and TTL_TYPE" below. |
+
-Indexes can be used by database search engines to speed up data retrieval. Simply put, an index is a pointer to the data in a table. Configuring a column index generally requires configuring the index key, index time column, TTL and TTL_TYPE. The index key must be configured, and other configuration items are optional. The following table lists the column index configuration items:
+**Configuration details of TTL and TTL_TYPE**:
-| configuration item | describe | Usage example |
-| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
-| `KEY` | Index column (required). OpenMLDB supports single-column indexes as well as joint indexes. When `KEY`=one column, a single-column index is configured. When `KEY`=multiple columns, the joint index of these columns is configured, specifically, several columns are spliced into a string as an index in order. | Single-column index: `INDEX(KEY=col1)`
Joint index: `INDEX(KEY=(col1, col2))` |
-| `TS` | Index time column (optional). Data on the same index will be sorted by the time index column. When `TS` is not explicitly configured, the timestamp of data insertion is used as the index time. | `INDEX(KEY=col1, TS=std_time)`. The index column is col1, and the data rows with the same col1 are sorted by std_time. |
-| `TTL_TYPE` | Elimination rules (optional). Including: `ABSOLUTE`, `LATEST`, `ABSORLAT`, `ABSANDLAT` these four types. When `TTL_TYPE` is not explicitly configured, the `ABSOLUTE` expiration configuration is used by default. | For specific usage, please refer to "Configuration Rules for TTL and TTL_TYPE" |
-| `TTL` | Maximum survival time/number of bars () is optional. Different TTL_TYPEs have different configuration methods. When `TTL` is not explicitly configured, `TTL=0`. A `TTL` of 0 means no eviction rule is set, and OpenMLDB will not evict records.
- | |
+| TTL_TYPE | TTL | Note | Example |
+| ----------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `ABSOLUTE` | The value of TTL represents the expiration time. The configuration value is a time period such as `100m, 12h, 1d, 365d`. The maximum configurable expiration time is `15768000m` (ie 30 years) | When a record expires, it is eliminated. | `INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=100m)`
OpenMLDB will delete data older than 100 minutes. |
+| `LATEST` | The value of TTL represents the maximum number of surviving entries. That is, under the same index, the maximum number of data items allowed exists. Up to 1000 can be configured | When the record exceeds the maximum number, it will be eliminated. | `INDEX(KEY=col1, TS=std_time, TTL_TYPE=LATEST, TTL=10)`. OpenMLDB will only keep the last 10 records and delete the previous records. |
+| `ABSORLAT` | It defines the expiration time and the maximum number of live records. The configuration value is a 2-tuple of the form `(100m, 10), (1d, 1)`. The maximum can be configured `(15768000m, 1000)`. | Eliminates if and only if the record expires** or if the record exceeds the maximum number of records. | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absorlat)`. When the record exceeds 100, **OR** when the record expires, it will be eliminated |
+| `ABSANDLAT` | It defines the expiration time and the maximum number of live records. The configuration value is a 2-tuple of the form `(100m, 10), (1d, 1)`. The maximum can be configured `(15768000m, 1000)`. | When records expire **OR** records exceed the maximum number of records, records will be eliminated. | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absandlat)`. When there are more than 100 records, **OR** the records expire, they will also be eliminated. |
-Configuration details of TTL and TTL_TYPE:
-| TTL_TYPE | TTL | describe | Usage example |
-| ----------- | ------------------------------------------------------------ | ---------------------------------------------------- | ------------------------------------------------------------ |
-| `ABSOLUTE` | The value of TTL represents the expiration time. The configuration value is a time period such as `100m, 12h, 1d, 365d`. The maximum configurable expiration time is `15768000m` (ie 30 years) | When a record expires, it is eliminated. | `INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=100m)`
OpenMLDB will delete data older than 100 minutes. |
-| `LATEST` | The value of TTL represents the maximum number of surviving entries. That is, under the same index, the maximum number of data items allowed exists. Up to 1000 can be configured | When the record exceeds the maximum number, it will be eliminated. | `INDEX(KEY=col1, TS=std_time, TTL_TYPE=LATEST, TTL=10)`. OpenMLDB will only keep the last 10 records and delete the previous records. |
-| `ABSORLAT` | Configure the expiration time and the maximum number of live records. The configuration value is a 2-tuple of the form `(100m, 10), (1d, 1)`. The maximum can be configured `(15768000m, 1000)`. | Eliminates if and only if the record expires** or if the record exceeds the maximum number of records. | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absorlat)`. When the record exceeds 100, **OR** when the record expires, it will be eliminated |
-| `ABSANDLAT` |Configure the expiration time and the maximum number of live records. The configuration value is a 2-tuple of the form `(100m, 10), (1d, 1)`. The maximum can be configured `(15768000m, 1000)`. | When records expire **AND** records exceed the maximum number of records, records will be eliminated. | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absandlat)`. When there are more than 100 records, **AND** the records expire, they will also be eliminated. |
+#### Example
-##### Example: Create A Table With A Single-Column Index
+
+**Example 1**
+
+The following sql example creates a table with a single-column index.
```sql
USE db1;
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -236,14 +236,15 @@ desc t1;
--- -------------------- ------ ---- ------ ---------------
```
-##### Example: Create A Table With A Union Column Index
+**Example 2**
+
+The following sql example creates a table with a joint index.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=(col0, col1)));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -257,17 +258,18 @@ desc t1;
--- -------------------- ----------- ---- ------ ---------------
1 INDEX_0_1639524576 col0|col1 - 0min kAbsoluteTime
--- -------------------- ----------- ---- ------ ---------------
-
```
-##### Example: Create A Table With A Single Column Index + Time Column
+**Example 3**
+
+The following sql example creates a table with a single-column index configuring the time column.
+
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -283,14 +285,15 @@ desc t1;
--- -------------------- ------ ---------- ------ ---------------
```
-##### Example: Create A Table With A Single Column Index + Time Column With A TTL Type Of Abusolute, And Configure The TTL To 30 Days
+**Example 4**
+
+The following sql example creates a table with a single-column index configuring the time column, TTL_TYPE and TTL.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -306,14 +309,15 @@ desc t1;
--- -------------------- ------ ---------- ---------- ---------------
```
-##### Example: Create A Table With Latest TTL Type, With A Single Column Index + Time Column, And Configure The TTL To 1
+**Example 5**
+
+The following sql commands create a table with a single-column index and set TTL_TYPE=LATEST.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=latest, TTL=1));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -329,15 +333,17 @@ desc t1;
--- -------------------- ------ ---------- ----- -------------
```
-##### Example: Create A Table With A Single-Column Index + Time Column Whose TTL Type Is absANDlat, And Configure The Expiration Time To Be 30 Days And The Maximum Number Of Retained Records As 10
+
+**Example 6**
+
+The following sql commands create a table with a single-column index, set TTL_TYPE=absandlat and configure the maximum number of retained records as 10.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absandlat, TTL=(30d,10)));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -351,17 +357,16 @@ desc t1;
--- -------------------- ------ ---------- -------------- ------------
1 INDEX_0_1639525038 col1 std_time 43200min&&10 kAbsAndLat
--- -------------------- ------ ---------- -------------- ------------
-
```
-##### Example: Create A Table With A Single-Column Index + Time Column Whose TTL Type Is absORlat, And Configure The Expiration Time To Be 30 Days And The Maximum Number Of Retained Records As 10
+**Example 7**
+The following sql commands create a table with a single-column index, set TTL_TYPE=absorlat and configure the maximum number of retained records as 10.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absorlat, TTL=(30d,10)));
---SUCCEED: Create successfully
-
+--SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -377,13 +382,15 @@ desc t1;
--- -------------------- ------ ---------- -------------- -----------
```
-##### Example: Create A Multi-Index Table
+**Example 8**
+
+The following sql commands create a multi-index table.
+
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col0, TS=std_time), INDEX(KEY=col1, TS=std_time));
---SUCCEED: Create successfully
-
+--SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -400,26 +407,22 @@ desc t1;
--- -------------------- ------ ---------- ------ ---------------
```
-#### Table Property TableOptions (optional)
+### Table Property TableOptions (optional)
```sql
TableOptions
::= 'OPTIONS' '(' TableOptionItem (',' TableOptionItem)* ')'
-
TableOptionItem
::= PartitionNumOption
| ReplicaNumOption
| DistributeOption
| StorageModeOption
--- PartitionNum
PartitionNumOption
::= 'PARTITIONNUM' '=' int_literal
--- ReplicaNumOption
ReplicaNumOption
::= 'REPLICANUM' '=' int_literal
--- DistributeOption
DistributeOption
::= 'DISTRIBUTION' '=' DistributionList
DistributionList
@@ -432,11 +435,8 @@ FollowerEndpointList
::= '[' Endpoint (',' Endpoint)* ']'
Endpoint
::= string_literals
-
--- StorageModeOption
StorageModeOption
::= 'STORAGE_MODE' '=' StorageMode
-
StorageMode
::= 'Memory'
| 'HDD'
@@ -445,28 +445,31 @@ StorageMode
-| configuration item | describe |
-Usage example |
-|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
-| `PARTITIONNUM` | Configure the number of partitions for the table. OpenMLDB divides the table into different partition blocks for storage. A partition is the basic unit of storage, replica, and failover related operations in OpenMLDB. When not explicitly configured, `PARTITIONNUM` defaults to 8. | `OPTIONS (PARTITIONNUM=8)` |
-| `REPLICANUM` | Configure the number of replicas for the table. Note that the number of replicas is only configurable in Cluster OpenMLDB. | `OPTIONS (REPLICANUM=3)` |
-| `DISTRIBUTION` | Configure the distributed node endpoint configuration. Generally, it contains a Leader node and several follower nodes. `(leader, [follower1, follower2, ..])`. Without explicit configuration, OpenMLDB will automatically configure `DISTRIBUTION` according to the environment and node. | `DISTRIBUTION = [ ('127.0.0.1:6527', [ '127.0.0.1:6528','127.0.0.1:6529' ])]` |
-| `STORAGE_MODE` | The storage mode of the table. The supported modes are `Memory`, `HDD` or `SSD`. When not explicitly configured, it defaults to `Memory`.
If you need to support a storage mode other than `Memory` mode, `tablet` requires additional configuration options. For details, please refer to [tablet configuration file conf/tablet.flags](../../../deploy/ conf.md). | `OPTIONS (STORAGE_MODE='HDD')` |
-##### Disk Table(`STORAGE_MODE` == `HDD`|`SSD`)With Memory Table(`STORAGE_MODE` == `Memory`)The Difference
-- Currently disk tables do not support GC operations
-- When inserting data into a disk table, if (`key`, `ts`) are the same under the same index, the old data will be overwritten; a new piece of data will be inserted into the memory table
-- Disk tables do not support `addindex` and `deleteindex` operations, so you need to define all required indexes when creating a disk table
-(The `deploy` command will automatically add the required indexes, so for a disk table, if the corresponding index is missing when it is created, `deploy` will fail)
+| Configuration Item | Note | Example |
+|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
+| `PARTITIONNUM` | It defines the number of partitions for the table. OpenMLDB divides the table into different partition blocks for storage. A partition is the basic unit of storage, replica, and fail-over related operations in OpenMLDB. When not explicitly configured, `PARTITIONNUM` defaults to 8. | `OPTIONS (PARTITIONNUM=8)` |
+| `REPLICANUM` | It defines the number of replicas for the table. Note that the number of replicas is only configurable in Cluster version. | `OPTIONS (REPLICANUM=3)` |
+| `DISTRIBUTION` | It defines the distributed node endpoint configuration. Generally, it contains a Leader node and several followers. `(leader, [follower1, follower2, ..])`. Without explicit configuration, OpenMLDB will automatically configure `DISTRIBUTION` according to the environment and nodes. | `DISTRIBUTION = [ ('127.0.0.1:6527', [ '127.0.0.1:6528','127.0.0.1:6529' ])]` |
+| `STORAGE_MODE` | It defines the storage mode of the table. The supported modes are `Memory`, `HDD` and `SSD`. When not explicitly configured, it defaults to `Memory`.
If you need to support a storage mode other than `Memory` mode, `tablet` requires additional configuration options. For details, please refer to [tablet configuration file **conf/tablet.flags**](../../../deploy/conf.md#the-configuration-file-for-apiserver:-conf/tablet.flags). | `OPTIONS (STORAGE_MODE='HDD')` |
+
+
+#### The Difference between Disk Table and Memory Table
+- If the value of `STORAGE_MODE` is `HDD` or `SSD`, the table is a **disk table**. If `STORAGE_MODE` is `Memory`, the table is a **memory table**.
+- Currently, disk tables do not support GC operations
+- When inserting data into a disk table, if (`key`, `ts`) are the same under the same index, the old data will be overwritten; a new piece of data will be inserted into the memory table.
+- Disk tables do not support `addindex` or `deleteindex` operations, so you need to define all required indexes when creating a disk table. The `deploy` command will automatically add the required indexes, so for a disk table, if the corresponding index is missing when it is created, `deploy` will fail.
-##### Example: Create A Band Table, Configure The Number Of Partions As 8, The Number Of Replicas As 3, And The Storage Mode As HDD
+
+
+#### Example
+The following sql commands create a table and configure the number of partitions as 8, the number of replicas as 3, and the storage_mode as HDD.
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time)) OPTIONS(partitionnum=8, replicanum=3, storage_mode='HDD');
---SUCCEED: Create successfully
-
+--SUCCEED
DESC t1;
--- ---------- ----------- ------ ----------
# Field Type Null Default
@@ -486,6 +489,11 @@ DESC t1;
HDD
--------------
```
+The following sql command create a table with specified distribution.
+```sql
+create table t1 (col0 string, col1 int) options (DISTRIBUTION=[('127.0.0.1:30921', ['127.0.0.1:30922', '127.0.0.1:30923']), ('127.0.0.1:30922', ['127.0.0.1:30921', '127.0.0.1:30923'])]);
+--SUCCEED
+```
## Related SQL
diff --git a/docs/en/reference/sql/ddl/DESC_STATEMENT.md b/docs/en/reference/sql/ddl/DESC_STATEMENT.md
index 355d0241b50..8179c952c56 100644
--- a/docs/en/reference/sql/ddl/DESC_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/DESC_STATEMENT.md
@@ -10,7 +10,7 @@ TableName ::=
Identifier ('.' Identifier)?
```
-The `DESC` statement can display table details to the user.
+The `DESC` statement can display table details.
## SQL Statement Template
@@ -20,14 +20,14 @@ DESC table_name;
## Example:
-create a database`db1`:
+Create a database`db1`:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
Then select `db1` as the current database:
@@ -41,21 +41,26 @@ Create two tables:
```sql
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
---SUCCEED: Create successfully
+--SUCCEED
desc t1;
- --- ---------- ----------- ------ ---------
- # Field Type Null Default
- --- ---------- ----------- ------ ---------
- 1 col0 Varchar YES
- 2 col1 Int YES
- 3 std_time Timestamp YES
- --- ---------- ----------- ------ ---------
- --- -------------------- ------ ---------- ---------- ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---------- ---------- ---------------
- 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
- --- -------------------- ------ ---------- ---------- ---------------
+ --- ---------- ----------- ------ ---------
+ # Field Type Null Default
+ --- ---------- ----------- ------ ---------
+ 1 col0 Varchar YES
+ 2 col1 Int YES
+ 3 std_time Timestamp YES
+ --- ---------- ----------- ------ ---------
+ --- -------------------- ------ ---------- ---------- ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---------- ---------- ---------------
+ 1 INDEX_0_1658136511 col1 std_time 43200min kAbsoluteTime
+ --- -------------------- ------ ---------- ---------- ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
@@ -65,7 +70,7 @@ desc t1;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](./SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
-[SHOW TABLES](../ddl/SHOW_STATEMENT.md)
+[SHOW TABLES](./SHOW_TABLES_STATEMENT.md)
diff --git a/docs/en/reference/sql/ddl/DROP_DATABASE_STATEMENT.md b/docs/en/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
index 102dd3ae689..ba04cf95ee8 100644
--- a/docs/en/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
@@ -10,19 +10,15 @@ The `DROP DATABASE` statement is used to drop a database.
## **Example**
-Create a database and set it as the current database:
+The following SQL commands create two databases and view all databases.
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
-```
-
-Check out the database list:
+-- SUCCEED
-```sql
SHOW DATABASES;
-----------
Databases
@@ -31,22 +27,19 @@ SHOW DATABASES;
db2
-----------
```
-
-drop database `db1`
+The following SQL command deletes the database `db1` and list the rest of the databases.
```sql
DROP DATABASE db1;
-```
-
-Check out the database list again:
-```sql
SHOW DATABASES;
- -----------
- Databases
- -----------
- db2
- -----------
+ -----------
+ Databases
+ -----------
+ db2
+ -----------
+
+1 rows in set
```
## Related Terms
@@ -55,5 +48,6 @@ SHOW DATABASES;
[CREATE DATABASE](./CREATE_DATABASE_STATEMENT.md)
-[SHOW DATABASES](../ddl/SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
+
diff --git a/docs/en/reference/sql/ddl/DROP_INDEX_STATEMENT.md b/docs/en/reference/sql/ddl/DROP_INDEX_STATEMENT.md
new file mode 100644
index 00000000000..b85e90b0727
--- /dev/null
+++ b/docs/en/reference/sql/ddl/DROP_INDEX_STATEMENT.md
@@ -0,0 +1,22 @@
+# DROP INDEX
+The `DROP INDEX` statement is used to drop an index of a specific table.
+
+## Syntax
+
+```sql
+DROPIndexstmt ::=
+ 'DROP' 'INDEX' TableName.IndexName
+```
+
+
+
+
+## **Example**
+```SQL
+DROP INDEX t5.index2;
+-- SUCCEED
+```
+
+## Related SQL
+
+[CREATE INDEX](./CREATE_INDEX_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/en/reference/sql/ddl/DROP_TABLE_STATEMENT.md b/docs/en/reference/sql/ddl/DROP_TABLE_STATEMENT.md
index 32e2d61c0be..531923a6b5a 100644
--- a/docs/en/reference/sql/ddl/DROP_TABLE_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/DROP_TABLE_STATEMENT.md
@@ -8,11 +8,11 @@ The `DROP TABLE` statement is used to drop a specified table.
## Example: Delete a Table in the Current Database
-Create a database and set it as the current database:
+Create database `db1` and set it as the current database:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
@@ -22,13 +22,13 @@ Create two tables `t1` and `t2` in the database:
```sql
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
CREATE TABLE t2(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
```
-View the tables under the database:
+View the tables of current database:
```sql
SHOW TABLES;
@@ -51,7 +51,7 @@ DROP TABLE t1;
-- SUCCEED: Drop successfully
```
-Look at the tables under the database again:
+Look at the tables of `db1` again:
```sql
SHOW TABLES;
diff --git a/docs/en/reference/sql/ddl/SET_STATEMENT.md b/docs/en/reference/sql/ddl/SET_STATEMENT.md
index 4fa258f295a..fecf10b918d 100644
--- a/docs/en/reference/sql/ddl/SET_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/SET_STATEMENT.md
@@ -1,4 +1,5 @@
# SET STATEMENT
+The `SET` statement is used to set system variables of OpenMLDB. At present, the system variables of OpenMLDB include session system variables and global system variables. Modifications to session variables will only affect the current session (that is, the current database connection). Modifications to global variables take effect for all sessions.
## Syntax
@@ -7,33 +8,32 @@ SetStatement ::=
'SET' variableName '=' value
variableName ::=
- | sessionVariableName
+ sessionVariableName
sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Identifier
```
-in the following way
+The following format is also equivalent.
```sql
'SET' [ GLOBAL | SESSION ] '='
```
-**Description**
-The `SET` statement is used to set system variables on OpenMLDB. At present, the system variables of OpenMLDB include session system variables and global system variables. Modifications to session variables will only affect the current session (that is, the current database connection). Modifications to global variables take effect for all sessions.
-- Session system variables are usually prefixed with `@session`, such as SET @@session.execute_mode = "offline". `Note⚠️: Session system variables can also be optionally prefixed with `@@` directly, that is, `SET @@execute_mode = "offline"` is equivalent to the previous configuration statement. Variable names are case-insensitive.
-- Global system variables are prefixed with `@global`, such as SET @@global.enable_trace = true;
-- OpenMLDB's SET statement can only be used to set/modify existing (built-in) system variables.
+- Session system variables are usually prefixed with `@session`, such as SET @@session.execute_mode = "offline". Session system variables can also be optionally prefixed with `@@` directly, that is, `SET @@execute_mode = "offline"` is equivalent to the previous configuration statement.
+- Global system variables are prefixed with `@global`, such as `SET @@global.enable_trace = true;`
+- `SET STATEMENT` can only be used to set/modify existing (built-in) system variables.
+- Variable names are case-insensitive.
## Currently Supported System Variables
### SESSION System Variable
-| SESSION System Variable | Variable Description | Variable Value | Default Value |
-| -------------------------------------- | ------------------------------------------------------------ | --------------------- | --------- |
-| @@session.execute_mode|@@execute_mode | The execution mode of OpenMDLB in the current session. Currently supports "offline" and "online" two modes.
In offline execution mode, only offline data will be imported/inserted and queried.
In online execution mode, only online data will be imported/inserted and queried. | "offline" \| "online" | "offline" |
-| @@session.enable_trace|@@enable_trace | Console error message trace switch.
When the switch is on (`SET @@enable_trace = "true"`), an error message stack is printed when the SQL statement has a syntax error or an error occurs during the plan generation process.
When the switch is off (`SET @@enable_trace = "false"`), the SQL statement has a syntax error or an error occurs during the plan generation process, only the basic error message is printed. | "true" \| "false" | "false" |
-| @@session.sync_job|@@sync_job | ...开关。
When the switch is on (`SET @@sync_job = "true"`), the offline command will become synchronous, waiting for the final result of the execution.
When the switch is closed (`SET @@sync_job = "false"`), the offline command returns immediately, and you need to check the command execution through `SHOW JOB`. | "true" \| "false" | "false" |
-| @@session.sync_timeout|@@sync_timeout | ...
When offline command synchronization is enabled, you can configure the waiting time for synchronization commands. The timeout will return immediately. After the timeout returns, you can still view the command execution through `SHOW JOB`. | Int | "20000" |
+| SESSION System Variable | Note | Variable Value | Default Value |
+| -------------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------| ----- |
+| @@session.execute_mode|@@execute_mode | The execution mode of OpenMDLB in the current session. Currently supports `offline` and `online` two modes.
In offline execution mode, only offline data will be imported/inserted and queried.
In online execution mode, only online data will be imported/inserted and queried. | `offline`,
`online"` | `offline` |
+| @@session.enable_trace|@@enable_trace | When the value is `true`, an error message stack will be printed when the SQL statement has a syntax error or an error occurs during the plan generation process.
When the value is `false`, only the basic error message will be printed if there is a SQL syntax error or an error occurs during the plan generation process. | `true`,
`false` | `false` |
+| @@session.sync_job|@@sync_job | When the value is `true`, the offline command will be executed synchronously, waiting for the final result of the execution.
When the value is `false`, the offline command returns immediately. If you need to check the execution, please use `SHOW JOB` command. | `true`,
`false` | `false` |
+| @@session.sync_timeout|@@sync_timeout | When `sync_job=true`, you can configure the waiting time for synchronization commands. The timeout will return immediately. After the timeout returns, you can still view the command execution through `SHOW JOB`. | Int | 20000 |
## Example
@@ -52,6 +52,7 @@ The `SET` statement is used to set system variables on OpenMLDB. At present, the
4 rows in set
> SET @@session.execute_mode = "online";
+-- SUCCEED
> SHOW VARIABLES;
--------------- ---------
Variable_name Value
@@ -64,6 +65,7 @@ The `SET` statement is used to set system variables on OpenMLDB. At present, the
4 rows in set
> SET @@session.enable_trace = "true";
+ -- SUCCEED
> SHOW VARIABLES;
--------------- ---------
Variable_name Value
@@ -76,7 +78,9 @@ The `SET` statement is used to set system variables on OpenMLDB. At present, the
4 rows in set
```
-### Set and Display Session System Variables
+
+
+### Set and Display Global System Variables
```sql
> SHOW GLOBAL VARIABLES;
--------------- ----------------
@@ -90,6 +94,7 @@ The `SET` statement is used to set system variables on OpenMLDB. At present, the
4 rows in set
> SET @@global.enable_trace = "true";
+-- SUCCEED
> SHOW GLOBAL VARIABLES;
--------------- ----------------
Variable_name Variable_value
@@ -103,32 +108,33 @@ The `SET` statement is used to set system variables on OpenMLDB. At present, the
4 rows in set
```
-### Configure enable_trace
+### Configure `enable_trace`
-- Create a database `db1` and create table t1
+- Create a database `db1` and create table `t1`.
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
---SUCCEED: Create successfully
-
+--SUCCEED
```
-- When enable_trace is turned off, the wrong SQL is executed:
+- When `enable_trace` is `false`, executing an invalid SQL will generate the following information.
```sql
> set @@enable_trace = "false";
+-- SUCCEED
> select sum(col1) over w1 from t1 window w1 as (partition by col1 order by col0 rows_range between 10d preceding and current row);
-- ERROR: Invalid Order column type : kVarchar
```
-- When enable_trace is turned on, the wrong SQL is executed:
+- When `enable_trace` is `true`, executing an invalid SQL will generate the following information.
```sql
> set @@enable_trace = "true";
+-- SUCCEED
> select sum(col1) over w1 from t1 window w1 as (partition by col1 order by col0 rows_range between 10d preceding and current row);
-- ERROR: Invalid Order column type : kVarchar
(At /Users/chenjing/work/chenjing/OpenMLDB/hybridse/src/vm/sql_compiler.cc:263)
@@ -141,16 +147,16 @@ CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=s
(At /Users/chenjing/work/chenjing/OpenMLDB/hybridse/src/vm/transform.cc:1997)
```
-### Configure Offline Command Synchronous Execution
+### Configure Synchronous Execution for Offline Commands
-- Set offline command synchronous execution:
+- Set the synchronous execution for offline commands:
```sql
> SET @@sync_job = "true";
```
-- Set the wait time for synchronization commands (in milliseconds):
+- Set the waiting time for synchronization commands (in milliseconds):
```sql
> SET @@job_timeout = "600000";
```
diff --git a/docs/en/reference/sql/ddl/SHOW_COMPONENTS.md b/docs/en/reference/sql/ddl/SHOW_COMPONENTS.md
new file mode 100644
index 00000000000..f94db653706
--- /dev/null
+++ b/docs/en/reference/sql/ddl/SHOW_COMPONENTS.md
@@ -0,0 +1,40 @@
+# SHOW COMPONENTS
+`SHOW COMPONENTS` is used to show the information of components.
+
+```sql
+SHOW COMPONENTS;
+```
+
+## Output Information
+
+| Column | Note |
+| ------------ |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Endpoint | It shows the endpoint of the component by providing the IP and the port, which is the same as the `--endpoint` flag in configuration files. |
+| Role | It indicates the role of the component, which is the same as the `--role` flag in configuration files.
There are four types of roles: `tablet`, `nameserver`, `taskmanager` and `apiserver`. |
+| Connect_time | It shows the timestamp (in milliseconds) of connection establishment of the component. |
+| Status | It shows the status of the component. There are three kinds of status: `online`, `offline` and `NULL`. |
+| Ns_role | It shows the role of the Namserver: `master` or `standby`. For other components, Ns_role is `NULL`. |
+
+
+```{note}
+Currently, there are certain limitations of `SHOW COMPONETS`:
+- It does not include the information of the APIServer.
+- It can only shows the information of one leader task manager, but is not working for followers.
+- The `Connect_time` of nameserver in tha standalone version is inaccurate.
+```
+## Example
+
+```sql
+SHOW COMPONENTS;
+ ---------------- ------------ --------------- -------- ---------
+ Endpoint Role Connect_time Status Ns_role
+ ---------------- ------------ --------------- -------- ---------
+ 127.0.0.1:9520 tablet 1654759517890 online NULL
+ 127.0.0.1:9521 tablet 1654759517942 online NULL
+ 127.0.0.1:9522 tablet 1654759517919 online NULL
+ 127.0.0.1:9622 nameserver 1654759519015 online master
+ 127.0.0.1:9623 nameserver 1654759521016 online standby
+ 127.0.0.1:9624 nameserver 1654759523030 online standby
+ ---------------- ------------ --------------- -------- ---------
+```
+
diff --git a/docs/en/reference/sql/ddl/SHOW_TABLES_STATEMENT.md b/docs/en/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
index fa7aa602c0e..b0614b8a0ad 100644
--- a/docs/en/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
@@ -10,16 +10,16 @@ The `SHOW TABLES` statement is used to display the tables that the user has acce
```sql
CREATE DATABASE db1;
---SUCCEED: Create database successfully
+--SUCCEED
USE db1;
--SUCCEED: Database changed
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
CREATE TABLE t2(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
SHOW TABLES;
--------
diff --git a/docs/en/reference/sql/ddl/SHOW_TABLE_STATUS.md b/docs/en/reference/sql/ddl/SHOW_TABLE_STATUS.md
new file mode 100644
index 00000000000..fe31b60ef27
--- /dev/null
+++ b/docs/en/reference/sql/ddl/SHOW_TABLE_STATUS.md
@@ -0,0 +1,43 @@
+# SHOW TABLE STATUS
+
+`SHOW TABLE STATUS` is used to show information about tables in a given database or all databases, excluding hidden databases.
+If no database is used, `SHOW TABLE STATUS` will display information about all tables in all databases, excluding hidden databases.
+If a database is specified, the statement will only display information about the tables in the given database.
+
+```sql
+SHOW TABLE STATUS;
+```
+
+
+## Output Information
+
+| Column | Note |
+| ----------------- |----------------------------------------------------------------------------------------------------------------------------------------|
+| Table_id | It shows the unique id of the table. |
+| Table_name | It shows the name of the table. |
+| Database_name | It shows the name of the database, which the table belongs to. |
+| Storage_type | It shows the storage type of the table. There are three types of value: `memory`,`ssd` and `hdd`. |
+| Rows | It shows the number of rows in this table. |
+| Memory_data_size | It shows the memory usage of the table in bytes. |
+| Disk_data_size | It shows the disk usage of the table in bytes. |
+| Partition | It shows the number of partitons of the table. |
+| Partition_unalive | It shows the number of the unalive partitions of the table. |
+| Replica | It shows the number of replicas of the table. |
+| Offline_path | It shows the path of the offline data for this table and is valid only for offline tables. The `NULL` value means the path is not set. |
+| Offline_format | It shows the offline data format of the table and is valid only for offline tables. The `NULL` value means it is not set. |
+| Offline_deep_copy | It indicates whether deep copy is used on the table and is valid only for offline tables. The `NULL` value means it is not set. |
+
+
+
+## Example
+
+```sql
+> USE db;
+> SHOW TABLE STATUS;
+ ---------- ------------ --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- -------------- ---------------- -------------------
+ Table_id Table_name Database_name Storage_type Rows Memory_data_size Disk_data_size Partition Partition_unalive Replica Offline_path Offline_format Offline_deep_copy
+ ---------- ------------ --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- -------------- ---------------- -------------------
+ 6 t1 db memory 2 479 0 8 0 3 NULL NULL NULL
+ ---------- ------------ --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- -------------- ---------------- -------------------
+```
+
diff --git a/docs/en/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md b/docs/en/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
index b5fe7898e46..d86ad926de5 100644
--- a/docs/en/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
@@ -1,51 +1,95 @@
# SHOW VARIABLES
+`SHOW VARIABLES` is used to view system variables.
+- The `SHOW SESSION VARIABLES` or `SHOW VARIABLES` statement can display system variables of the **current session**.
+- `SHOW GLOBAL VARIABLES` is used to display the **global** system variables
+Currently, OpenMLDB only supports session system variables and global system variables but doesn't support user variables. Modifications to session variables will only affect the current session (that is, the current database connection). Therefore, when you close the connection (or exit the console), and then reconnect (or log in to the console again), the previous configuration and modification of session variables will be reset.
+
+## Syntax
```sql
ShowVariablesStmt ::=
- ShowSessionVariablesStmt
+ ShowSessionVariablesStmt | ShowGlobalVariablesStmt
ShowSessionVariablesStmt ::=
- 'SHOW' 'VARIABLES'
- |'SHOW' 'SESSION' 'VARIABLES'
-
+ 'SHOW' 'VARIABLES'
+ |'SHOW' 'SESSION' 'VARIABLES'
+ShowGlobalVariablesStmt ::=
+ 'SHOW' 'GLOBAL' 'VARIABLES'
```
-The `SHOW SESSION VARIABLES` or `SHOW VARIABLES` statement is used to display system variables for the current session.
-Currently OpenMLDB only supports session system variables. Modifications to session variables will only affect the current session (that is, the current database connection). Therefore, when you close the database connection (or exit the console), and then reconnect (or log in to the console again), the previous configuration and modification of session variables will be reset.
## Example
```sql
> SHOW SESSION VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
enable_trace false
- execute_mode online
- --------------- --------
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
> SET @@enable_trace = "true"
-
+ --SUCCEED
> SHOW VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
enable_trace true
- execute_mode online
- --------------- --------
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
+
+> SHOW GLOBAL VARIABLES;
+ --------------- ----------------
+ Variable_name Variable_value
+ --------------- ----------------
+ enable_trace false
+ sync_job false
+ job_timeout 20000
+ execute_mode offline
+ --------------- ----------------
+
+4 rows in set
```
-After exiting the console, log back into the console
+After exiting the console, login again into the console and check the variables again.
```sql
> SHOW SESSION VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
enable_trace false
- execute_mode online
- --------------- --------
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
+
+> SHOW GLOBAL VARIABLES;
+ --------------- ----------------
+ Variable_name Variable_value
+ --------------- ----------------
+ enable_trace false
+ sync_job false
+ job_timeout 20000
+ execute_mode offline
+ --------------- ----------------
+
+4 rows in set
```
diff --git a/docs/en/reference/sql/ddl/USE_DATABASE_STATEMENT.md b/docs/en/reference/sql/ddl/USE_DATABASE_STATEMENT.md
index fb9efd06231..770bc868c9e 100644
--- a/docs/en/reference/sql/ddl/USE_DATABASE_STATEMENT.md
+++ b/docs/en/reference/sql/ddl/USE_DATABASE_STATEMENT.md
@@ -24,10 +24,10 @@ Create a database `db1`:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
Then select `db1` as the current database:
@@ -41,21 +41,23 @@ Create two tables:
```sql
CREATE TABLE t1(col0 string);
--- SUCCEED: Create successfully
+-- SUCCEED
-CREATE TABLE t1(col0 string);
--- SUCCEED: Create successfully
+CREATE TABLE t2(col0 string);
+-- SUCCEED
SHOW TABLES;
- --------
- Tables
- --------
- t1
- t2
- --------
+ --------
+ Tables
+ --------
+ t1
+ t2
+ --------
+
+2 rows in set
```
-Then select `db2` as the current database and view the tables under the current library:
+Then select `db2` as the current database and view the tables in `db2`:
```sql
USE db2;
@@ -72,6 +74,6 @@ SHOW TABLES;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](./SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
-[SHOW TABLES](./SHOW_STATEMENT.md#show-tables)
\ No newline at end of file
+[SHOW TABLES](./SHOW_TABLES_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/en/reference/sql/ddl/index.rst b/docs/en/reference/sql/ddl/index.rst
index 803d12ac60d..1868cbb0fb8 100644
--- a/docs/en/reference/sql/ddl/index.rst
+++ b/docs/en/reference/sql/ddl/index.rst
@@ -13,6 +13,10 @@ Data Definition Statement (DDL)
DESC_STATEMENT
CREATE_TABLE_STATEMENT
DROP_TABLE_STATEMENT
+ SHOW_COMPONENTS
SHOW_TABLES_STATEMENT
SHOW_VARIABLES_STATEMENT
+ SHOW_TABLE_STATUS
SET_STATEMENT
+ CREATE_INDEX_STATEMENT
+ DROP_INDEX_STATEMENT
diff --git a/docs/en/reference/sql/deployment_manage/DEPLOY_STATEMENT.md b/docs/en/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
index 220c598f6f9..15df5134807 100644
--- a/docs/en/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
+++ b/docs/en/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
@@ -1,84 +1,99 @@
-# 创建 DEPLOYMENT
+# DEPLOY
## Syntax
```sql
CreateDeploymentStmt
- ::= 'DEPLOY' [DeployOptions] DeploymentName SelectStmt
-
-DeployOptions(可选)
- ::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
-
+ ::= 'DEPLOY' [DeployOptionList] DeploymentName SelectStmt
+
+DeployOptionList
+ ::= DeployOption*
+
+DeployOption
+ ::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
+
DeploymentName
- ::= identifier
+ ::= identifier
```
-`DeployOptions`的定义详见[DEPLOYMENT属性DeployOptions(可选)](#DEPLOYMENT属性DeployOptions(可选)).
-`DEPLOY`语句可以将SQL部署到线上。OpenMLDB仅支持部署[Select查询语句](../dql/SELECT_STATEMENT.md),并且需要满足[OpenMLDB SQL上线规范和要求](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)
+Please refer to [DEPLOYMENT Property DeployOptions (optional)](#deployoptions-optional) for the definition of `DeployOptions`.
+Please refer to [Select Statement](../dql/SELECT_STATEMENT.md) for the definition of `SelectStmt`.
-```SQL
-DEPLOY deployment_name SELECT clause
-```
-### Example: 部署一个SQL到online serving
+The `DEPLOY` statement is used to deploy SQL online. OpenMLDB supports to deploy [Select Statement](../dql/SELECT_STATEMENT.md), and the SQL script should meet the requirements in [OpenMLDB SQL Requirement](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)
+
+
+
+**Example**
+
-```sqlite
+The following commands deploy a SQL script online under the Online Request mode of cluster version.
+```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
-CREATE TABLE t1(col0 STRING);
+CREATE TABLE demo_table1(c1 string, c2 int, c3 bigint, c4 float, c5 double, c6 timestamp, c7 date);
-- SUCCEED: Create successfully
-DEPLOY demo_deploy select col0 from t1;
--- SUCCEED: deploy successfully
+DEPLOY demo_deploy SELECT c1, c2, sum(c3) OVER w1 AS w1_c3_sum FROM demo_table1 WINDOW w1 AS (PARTITION BY demo_table1.c1 ORDER BY demo_table1.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+
+-- SUCCEED
```
-查看部署详情:
+We can use `SHOW DEPLOYMENT demo_deploy` command to see the detail of a specific deployment.
```sql
-
-SHOW DEPLOYMENT demo_deploy;
- ----- -------------
- DB Deployment
- ----- -------------
- db1 demo_deploy
- ----- -------------
- 1 row in set
-
- ----------------------------------------------------------------------------------
- SQL
- ----------------------------------------------------------------------------------
- CREATE PROCEDURE deme_deploy (col0 varchar) BEGIN SELECT
- col0
+ --------- -------------------
+ DB Deployment
+ --------- -------------------
+ demo_db demo_deploy
+ --------- -------------------
+1 row in set
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SQL
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ DEPLOY demo_data_service SELECT
+ c1,
+ c2,
+ sum(c3) OVER (w1) AS w1_c3_sum
FROM
- t1
-; END;
- ----------------------------------------------------------------------------------
+ demo_table1
+WINDOW w1 AS (PARTITION BY demo_table1.c1
+ ORDER BY demo_table1.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)
+;
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
1 row in set
-
# Input Schema
- --- ------- ---------- ------------
- # Field Type IsConstant
- --- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
+ --- ------- ------------ ------------
+ # Field Type IsConstant
+ --- ------- ------------ ------------
+ 1 c1 Varchar NO
+ 2 c2 Int32 NO
+ 3 c3 Int64 NO
+ 4 c4 Float NO
+ 5 c5 Double NO
+ 6 c6 Timestamp NO
+ 7 c7 Date NO
+ --- ------- ------------ ------------
# Output Schema
- --- ------- ---------- ------------
- # Field Type IsConstant
- --- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
+ --- ----------- ---------- ------------
+ # Field Type IsConstant
+ --- ----------- ---------- ------------
+ 1 c1 Varchar NO
+ 2 c2 Int32 NO
+ 3 w1_c3_sum Int64 NO
+ --- ----------- ---------- ------------
```
-### DEPLOYMENT Property DeployOptions (optional)
+### DeployOptions (optional)
```sql
-DeployOptions
+DeployOption
::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
DeployOptionItem
@@ -87,40 +102,58 @@ DeployOptionItem
LongWindowOption
::= 'LONG_WINDOWS' '=' LongWindowDefinitions
```
-Currently only the optimization option for long windows `LONG_WINDOWS` is supported.
+Currently, only the optimization option of long windows `LONG_WINDOWS` is supported.
#### Long Window Optimization
-##### Long Window Optimization Options Format
```sql
LongWindowDefinitions
- ::= 'LongWindowDefinition (, LongWindowDefinition)*'
+ ::= 'LongWindowDefinition (, LongWindowDefinition)*'
LongWindowDefinition
- ::= 'WindowName[:BucketSize]'
+ ::= WindowName':'[BucketSize]
WindowName
- ::= string_literal
+ ::= string_literal
-BucketSize (optional, defaults to)
- ::= int_literal | interval_literal
+BucketSize
+ ::= int_literal | interval_literal
-interval_literal ::= int_literal 's'|'m'|'h'|'d' (representing seconds, minutes, hours, days)
+interval_literal ::= int_literal 's'|'m'|'h'|'d'
```
-Among them, `BucketSize` is a performance optimization option. It will use `BucketSize` as the granularity to pre-aggregate the data in the table. The default value is `1d`.
-An example is as follows:
-```sqlite
-DEPLOY demo_deploy OPTIONS(long_windows="w1:1d") SELECT col0, sum(col1) OVER w1 FROM t1
- WINDOW w1 AS (PARTITION BY col0 ORDER BY col2 ROWS_RANGE BETWEEN 5d PRECEDING AND CURRENT ROW);
--- SUCCEED: deploy successfully
-```
+`BucketSize` is a performance optimization option. Data will be pre-aggregated according to `BucketSize`. The default value is `1d`.
+
-##### Limitation Factor
+
+##### Limitation
The current long window optimization has the following limitations:
-- Only supports `SelectStmt` involving only one physical table, i.e. `SelectStmt` containing `join` or `union` is not supported
-- Only supported aggregation operations: `sum`, `avg`, `count`, `min`, `max`
-- Do not allow data in the table when executing the `deploy` command
+- Only `SelectStmt` involving one physical table is supported, i.e. `SelectStmt` containing `join` or `union` is not supported.
+
+- Supported aggregation operations include: `sum`, `avg`, `count`, `min`, `max`, `count_where`, `min_where`, `max_where`, `sum_where`, `avg_where`.
+
+- The table should be empty when executing the `deploy` command.
+
+- For commands with `where` condition, like `count_where`, `min_where`, `max_where`, `sum_where`, `avg_where`, there are extra limitations:
+
+ 1. The main table should be a memory table (`storage_mode = 'Memory'`).
+
+ 2. The type of `BucketSize` should be range type, that is its value should be `interval_literal`. For example, `long_windows='w1:1d'` is supported, whereas `long_windows='w1:100'` is not supported.
+
+ 3. The expression for `where` should be the format of ` op ` or ` op `
+
+ - Supported where op: `>, <, >=, <=, =, !=`.
+
+ - The `` should not be `date` type or timestamp.
+
+**Example**
+
+```sql
+DEPLOY demo_deploy OPTIONS(long_windows="w1:1d") SELECT c1, sum(c2) OVER w1 FROM demo_table1
+ WINDOW w1 AS (PARTITION BY c1 ORDER BY c2 ROWS_RANGE BETWEEN 5d PRECEDING AND CURRENT ROW);
+-- SUCCEED
+```
+
## Relevant SQL
diff --git a/docs/en/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md b/docs/en/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
index f5f77954277..5cc6b4ade72 100644
--- a/docs/en/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
+++ b/docs/en/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
@@ -1,10 +1,10 @@
# Delete DEPLOYMENT
+The `DROP DEPLOYMENT` statement is used to drop a deployment under Online Request mode.
```SQL
DROP DEPLOYMENT deployment_name
```
-The `DROP DEPLOYMENT` statement is used to drop an OnlineServing deployment.
## Example:
@@ -12,24 +12,23 @@ Create a database and set it as the current database:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
```
Create a table `t1`:
-```
+```sql
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
-
+-- SUCCEED
```
-Deploy the query statement of table t1 to OnlineServing:
+Deploy the query statement of table t1 under Online Request mode:
```sql
> DEPLOY demo_deploy select col0 from t1;
-SUCCEED: deploy successfully
+SUCCEED
```
View all deployments in the current database:
@@ -51,7 +50,7 @@ Delete the specified deployment:
DROP DEPLOYMENT demo_deploy;
-- Drop deployment demo_deploy? yes/no
-- yes
--- SUCCEED: Drop successfully
+-- SUCCEED
```
@@ -59,7 +58,11 @@ After deletion, check the deployments under the database again, it should be an
```sql
SHOW DEPLOYMENTS;
-Empty set
+ ---- ------------
+ DB Deployment
+ ---- ------------
+
+0 rows in set
```
diff --git a/docs/en/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md b/docs/en/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md
new file mode 100644
index 00000000000..44bf5a858fa
--- /dev/null
+++ b/docs/en/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md
@@ -0,0 +1,101 @@
+# Online Specifications and Requirements for SQL
+
+OpenMLDB can provide real-time feature extraction services under *online request* mode. The [DEPLOY](../deployment_manage/DEPLOY_STATEMENT.md) command can deploy a SQL script feature extraction on the requested sample online. If the deployment is successful, users can perform real-time feature extraction through the Restful API or JDBC API. Note that only some SQL commands can be deployed to provide services online. To deploy these SQL commands please follow the specifications below.
+
+## Supported Statements under Online Request Mode
+
+Online request mode only supports [SELECT query statement](../dql/SELECT_STATEMENT.md).
+
+## Supported `SELECT` Clause by Online Request Mode
+
+It is worth noting that not all SELECT query statements can be deployed online, see [SELECT Statement](../dql/SELECT_STATEMENT.md#select-statement) for detail.
+
+The following table shows the `SELECT` clause supported under online request mode.
+
+| SELECT Clause | Note |
+|:------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Simple calculation on single table | The so-called simple single-table query is to process the column of a table, or use operation expressions, single-row processing function (Scalar Function) and their combined expressions on the table. You need to follow the [specifications of Single-table query under Online Request mode](#specifications-of-single-table-query-under-online-request-mode) |
+| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | OpenMLDB currently only supports **LAST JOIN**. For Online Request mode, please follow [the specifications of LAST JOIN under Online Request mode](#specifications-of-last-join-under-online-request-mode) |
+| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | The window clause is used to define one or several windows. Windows can be named or anonymous. Aggregate functions can be called on the window to perform some analytical computations. For Online Request mode, please follow the [specifications of WINDOW under Online Request mode](#specifications-of-window-under-online-request-mode) |
+| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | The LIMIT clause is used to limit the number of results. OpenMLDB currently only supports one parameter to limit the maximum number of rows of returned data. |
+
+## Specifications of `SELECT` Clause Supported by Online Request Mode
+
+### Specifications of Single-table Query under Online Request Mode
+
+- Only column computations, expressions, and single-row processing functions (Scalar Function) and their combined expressions are supported.
+- Single table query does not contain [GROUP BY clause](../dql/JOIN_CLAUSE.md), [WHERE clause](../dql/WHERE_CLAUSE.md), [HAVING clause](../dql/HAVING_CLAUSE.md) and [WINDOW clause](../dql/WINDOW_CLAUSE.md).
+- Single table query only involves the computation of a single table, and does not include the computation of [joined](../dql/JOIN_CLAUSE.md) multiple tables.
+
+**Example**
+
+```sql
+-- desc: SELECT all columns
+SELECT * FROM t1;
+
+-- desc: rename expression 1
+SELECT COL1 as c1 FROM t1;
+
+-- desc: rename expression 2
+SELECT COL1 c1 FROM t1;
+
+-- desc: SELECT on column expression
+SELECT COL1 FROM t1;
+SELECT t1.COL1 FROM t1;
+
+-- desc: unary expression
+SELECT -COL2 as COL2_NEG FROM t1;
+
+-- desc: binary expression
+SELECT COL1 + COL2 as COL12_ADD FROM t1;
+
+-- desc: type cast
+SELECT CAST(COL1 as BIGINT) as COL_BIGINT FROM t1;
+
+-- desc: function expression
+SELECT substr(COL7, 3, 6) FROM t1;
+```
+
+### Specifications of LAST JOIN under Online Request Mode
+
+- Only `LAST JOIN` is supported.
+- At least one JOIN condition is an EQUAL condition like `left_table.column=right_table.column`, and the `rgith_table.column` needs to be indexed as a `KEY` of the right table.
+- In the case of LAST JOIN with sorting, `ORDER BY` only supports column expressions, and the column needs to be indexed as a timestamp (TS) of the right table.
+
+**Example**
+
+```sql
+CREATE DATABASE db1;
+-- SUCCEED
+
+USE db1;
+-- SUCCEED: Database changed
+
+CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
+-- SUCCEED
+
+CREATE TABLE t2 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
+-- SUCCEED
+
+desc t1;
+ --- ---------- ----------- ------ ---------
+ # Field Type Null Default
+ --- ---------- ----------- ------ ---------
+ 1 col0 Varchar YES
+ 2 col1 Int YES
+ 3 std_time Timestamp YES
+ --- ---------- ----------- ------ ---------
+ --- -------------------- ------ ---------- ---------- ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---------- ---------- ---------------
+ 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
+ --- -------------------- ------ ---------- ---------- ---------------
+```
+### Specifications of WINDOW under Online Request Mode
+
+- Window boundary: only `PRECEDING` and `CURRENT ROW` are supported.
+- Window type: only `ROWS` and `ROWS_RANGE` are supported.
+- `PARTITION BY` only supports column expressions, and the column needs to be indexed as a `KEY`.
+- `ORDER BY` only support column expressions, and the column needs to be indexed as a timestamp (`TS`).
+- Other supported keywords: `EXCLUDE CURRENT_ROW`, `EXCLUDE CURRENT_TIME`, `MAXSIZE` and `INSTANCE_NOT_IN_WINDOW`. See [WindowSpec elements specifically designed by OpenMLDB](../dql/WINDOW_CLAUSE.md#windowspec-elements-specifically-designed-by-openmldb) for detail.
+
diff --git a/docs/en/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md b/docs/en/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md
deleted file mode 100644
index 8c46e6ea442..00000000000
--- a/docs/en/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# SQL On-Line Specifications and Requirements
-
-OpenMLDB Online Serving provides real-time feature extraction services. The [DEPLOY](../deployment_manage/DEPLOY_STATEMENT.md) command of OpenMLDB deploys a piece of SQL text to the wire. After the deployment is successful, users can perform feature extraction calculations on the request samples in real time through the Restful API or JDBC API. Not all SQL can be deployed to provide services online. OpenMLDB has a set of specifications for online statements and OP.
-
-## Online Serving Statement
-
-OpenMLDB only supports online [SELECT query statement](../dql/SELECT_STATEMENT.md).
-
-## Online Serving Op List
-
-It is worth noting that not all SELECT query statements can be online. In OpenMLDB, only `SELECT`, `WINDOW`, `LAST JOIN` OP can be online, other OP (including `WHERE`, `GROUP`, `HAVING`, `LIMIT`) are all unable to go online.
-
-This section will list the OPs that support Online Serving, and elaborate on the online usage specifications of these OPs.
-
-| SELECT Statement | description |
-| :----------------------------------------- | :----------------------------------------------------------- |
-| Single sheet simple expression calculation | During Online Serving, **simple single-table query** is supported. The so-called simple single-table query is to calculate the column, operation expression, single-row processing function (Scalar Function) and their combined expressions of a table. You need to follow the [Usage Specifications for Online Serving Order Form Query] (#online-serving Order Form Query Usage Specification) |
-| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | OpenMLDB currently only supports **LAST JOIN**. In Online Serving, you need to follow [The usage specification of LAST JOIN under Online Serving] (#online-serving usage specification of last-join) |
-| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | The window clause is used to define one or several windows. Windows can be named or anonymous. Users can call aggregate functions on the window to perform some analytical calculations (```sql agg_func() over window_name```). In Online Serving, you need to follow the [Usage Specifications of Window under Online Serving] (#the usage specification of window under online-serving) |
-
-## OP's usage specification under Online Serving
-
-### Online Serving Order Form Query Usage Specifications
-
-- Only supports column, expression, and single-row processing functions (Scalar Function) and their combined expression operations
-- Single table query does not contain [GROUP BY clause](../dql/JOIN_CLAUSE.md), [WHERE clause](../dql/WHERE_CLAUSE.md), [HAVING clause](../dql/ HAVING_CLAUSE.md) and [WINDOW clause](../dql/WINDOW_CLAUSE.md).
-- Single table query only involves the calculation of a single table, and does not design the calculation of multiple tables [JOIN](../dql/JOIN_CLAUSE.md).
-
-#### Example: Example of Simple SELECT Query Statement that Supports Online
-
-```sql
--- desc: SELECT all columns
-SELECT * FROM t1;
-
--- desc: SELECT expression renamed
-SELECT COL1 as c1 FROM t1;
-
--- desc: SELECT expression rename 2
-SELECT COL1 c1 FROM t1;
-
--- desc: SELECT column expression
-SELECT COL1 FROM t1;
-SELECT t1.COL1 FROM t1;
-
--- desc: SELECT unary expression
-SELECT -COL2 as COL2_NEG FROM t1;
-
--- desc: SELECT binary expression
-SELECT COL1 + COL2 as COL12_ADD FROM t1;
-
--- desc: SELECT type cast
-SELECT CAST(COL1 as BIGINT) as COL_BIGINT FROM t1;
-
--- desc: SELECT function expression
-SELECT substr(COL7, 3, 6) FROM t1;
-```
-
-### The Usage Specification of LAST JOIN Under Online Serving
-
-- Join type only supports `LAST JOIN` type
-- At least one JOIN condition is an EQUAL condition of the form `left_table.column=right_table.column`, and the `rgith_table.column` column needs to hit the index of the right table
-- In the case of LAST JOIN with sorting, `ORDER BY` can only support column expressions, and the column needs to hit the time column of the right table index
-
-#### Example: Example of Simple SELECT Query Statement that Supports Online
-
-
-
-```sql
-CREATE DATABASE db1;
-
-USE db1;
-CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
--- SUCCEED: Create successfully
-
-desc t1;
- --- ---------- ----------- ------ ---------
- # Field Type Null Default
- --- ---------- ----------- ------ ---------
- 1 col0 Varchar YES
- 2 col1 Int YES
- 3 std_time Timestamp YES
- --- ---------- ----------- ------ ---------
- --- -------------------- ------ ---------- ---------- ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---------- ---------- ---------------
- 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
- --- -------------------- ------ ---------- ---------- ---------------
-```
-### Window Usage Specification Under Online Serving
-
-- Window borders only support `PRECEDING` and `CURRENT ROW`
-- Window types only support `ROWS` and `ROWS_RANGE`
-- The window `PARTITION BY` can only support column expressions, and the column needs to hit the index
-- The window `ORDER BY` can only support column expressions, and the column needs to hit the time column of the index
-
diff --git a/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md b/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
index 6a3278c59ba..e79f1047781 100644
--- a/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
+++ b/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
@@ -1,10 +1,12 @@
# View DEPLOYMENT Details
+The `SHOW DEPLOYMENT` statement is used to display the detail of a specific task that has been deployed under Online Request mode.
+
+
```SQL
SHOW DEPLOYMENT deployment_name;
```
-The `SHOW DEPLOYMENT` statement is used to display the details of an OnlineServing.
## Example
@@ -12,7 +14,7 @@ Create a database and set it as the current database:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
@@ -24,24 +26,22 @@ Create a table `t1`:
```sql
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
```
-Deploy the query statement of table t1 to OnlineServing:
+Deploy the query statement of table t1:
```sql
DEPLOY demo_deploy select col0 from t1;
--- SUCCEED: deploy successfully
+-- SUCCEED
```
Check out the newly deployed deployment:
```sql
SHOW DEPLOYMENT demo_deploy;
-```
-```
----- -------------
DB Deployment
----- -------------
@@ -64,16 +64,15 @@ FROM
--- ------- ---------- ------------
# Field Type IsConstant
--- ------- ---------- ------------
- 1 col0 kVarchar NO
+ 1 col0 Varchar NO
--- ------- ---------- ------------
# Output Schema
--- ------- ---------- ------------
# Field Type IsConstant
--- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
-
+ 1 col0 Varchar NO
+ --- ------- ---------- ------------
```
## Related Statements
diff --git a/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md b/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
index 36bea5030b4..f3b14ba7fa6 100644
--- a/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
+++ b/docs/en/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
@@ -1,10 +1,12 @@
# View DEPLOYMENTS List
+The `SHOW DEPLOYMENTS` statement displays the tasks that have been deployed in the current database under Online Request mode.
+
+
```SQL
SHOW DEPLOYMENTS;
```
-The `SHOW DEPLOYMENTS` statement displays the online serving list that has been deployed under the current database.
## Example
diff --git a/docs/en/reference/sql/deployment_manage/index.rst b/docs/en/reference/sql/deployment_manage/index.rst
index 4b6d313bd84..32836f69c11 100644
--- a/docs/en/reference/sql/deployment_manage/index.rst
+++ b/docs/en/reference/sql/deployment_manage/index.rst
@@ -10,4 +10,4 @@ DEPLOYMENT Management
DROP_DEPLOYMENT_STATEMENT
SHOW_DEPLOYMENTS
SHOW_DEPLOYMENT
- ONLINE_SERVING_REQUIREMENTS
+ ONLINE_REQUEST_REQUIREMENTS
diff --git a/docs/en/reference/sql/dml/DELETE_STATEMENT.md b/docs/en/reference/sql/dml/DELETE_STATEMENT.md
new file mode 100644
index 00000000000..253b781b65c
--- /dev/null
+++ b/docs/en/reference/sql/dml/DELETE_STATEMENT.md
@@ -0,0 +1,24 @@
+# DELETE
+
+## Syntax
+
+```sql
+DeleteStmt ::=
+ DELETE FROM TableName WHERE where_condition
+
+TableName ::=
+ Identifier ('.' Identifier)?
+```
+
+**Description**
+
+`DELETE` statement will delete all data from the first index of specific column value.
+
+
+## Examples
+
+```SQL
+DELETE FROM t1 WHERE col1 = 'aaaa';
+
+DELETE FROM t1 WHERE col1 = 'aaaa' and col2 = 'bbbb';
+```
\ No newline at end of file
diff --git a/docs/en/reference/sql/dml/INSERT_STATEMENT.md b/docs/en/reference/sql/dml/INSERT_STATEMENT.md
index 4d3234322dc..a23fb53cd99 100644
--- a/docs/en/reference/sql/dml/INSERT_STATEMENT.md
+++ b/docs/en/reference/sql/dml/INSERT_STATEMENT.md
@@ -1,6 +1,6 @@
# INSERT
-OpenMLDB supports single-row and multi-row insert statements
+OpenMLDB supports single-row and multi-row insert statements.
## Syntax
@@ -21,12 +21,12 @@ value_list:
INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello");
-- insert a row into table with given columns's values
-INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello")
+INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello");
-- insert multiple rows into table with all columns
-INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello"), (10, 20, 30.0, 40.0, "world"), ;
+INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello"), (10, 20, 30.0, 40.0, "world");
-- insert multiple rows into table with given columns's values
-INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello"), (10, 20, "world")
+INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello"), (10, 20, "world");
```
diff --git a/docs/en/reference/sql/dml/LOAD_DATA_STATEMENT.md b/docs/en/reference/sql/dml/LOAD_DATA_STATEMENT.md
index aa1fc5f085b..63f98993052 100644
--- a/docs/en/reference/sql/dml/LOAD_DATA_STATEMENT.md
+++ b/docs/en/reference/sql/dml/LOAD_DATA_STATEMENT.md
@@ -1,77 +1,80 @@
# LOAD DATA INFILE
+The `LOAD DATA INFILE` statement load data efficiently from a file to a table. `LOAD DATA INFILE` is complementary to `SELECT ... INTO OUTFILE`. To export data from a table to a file, use [SELECT...INTO OUTFILE](../dql/SELECT_INTO_STATEMENT.md).
+
## Syntax
```sql
LoadDataInfileStmt
- ::= 'LOAD' 'DATA' 'INFILE' filePath LoadDataInfileOptionsList
-filePath ::= string_literal
+ ::= 'LOAD' 'DATA' 'INFILE' filePath 'INTO' 'TABLE' tableName LoadDataInfileOptionsList
+filePath
+ ::= string_literal
+
+tableName
+ ::= string_literal
LoadDataInfileOptionsList
- ::= 'OPTIONS' '(' LoadDataInfileOptionItem (',' LoadDataInfileOptionItem)* ')'
-
+ ::= 'OPTIONS' '(' LoadDataInfileOptionItem (',' LoadDataInfileOptionItem)* ')'
LoadDataInfileOptionItem
- ::= 'DELIMITER' '=' string_literal
- |'HEADER' '=' bool_literal
- |'NULL_VALUE' '=' string_literal
- |'FORMAT' '=' string_literal
+ ::= 'DELIMITER' '=' string_literal
+ |'HEADER' '=' bool_literal
+ |'NULL_VALUE' '=' string_literal
+ |'FORMAT' '=' string_literal
```
-The `LOAD DATA INFILE` statement reads lines quickly from a file to a table. `LOAD DATA INFILE` is complementary to `SELECT ... INTO OUTFILE`. To write data from a table to a file, use [SELECT...INTO OUTFILE](../dql/SELECT_INTO_STATEMENT.md)). To read the file back into the table, use `LOAD DATA INFILE`. Most of the configuration items of the two statements are the same, including:
+The following table introduces the parameters of `LOAD DATA INFILE`.
+| Parameter | Type | Default Value | Note |
+|--------------------|---------|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| delimiter | String | , | It defines the column separator, the default value is `,`. |
+| header | Boolean | true | It indicates that whether the table to import has a header. If the value is `true`, the table has a header. |
+| null_value | String | null | It defines the string that will be used to replace the `NULL` value when loading data. |
+| format | String | csv | It defines the format of the input file.
`csv` is the default format.
`parquet` format is supported in the cluster version. |
+| quote | String | "" | It defines the string surrounding the input data. The string length should be <= 1. The default is "", which means that the string surrounding the input data is empty. When the surrounding string is configured, the content surrounded by a pair of the quote characters will be parsed as a whole. For example, if the surrounding string is `"#"` then the original data like `1, 1.0, #This is a string, with comma#` will be converted to three field. The first field is an integer 1, the second is a float 1.0 and the third field is a string. |
+| mode | String | "error_if_exists" | It defines the input mode.
`error_if_exists` is the default mode which indicates that an error will be thrown out if the offline table already has data. This input mode is only supported by the offline execution mode.
`overwrite` indicates that if the file already exists, the data will overwrite the contents of the original file. This input mode is only supported by the offline execution mode.
`append` indicates that if the table already exists, the data will be appended to the original table. Both offline and online execution modes support this input mode. |
+| deep_copy | Boolean | true | It defines whether `deep_copy` is used. Only offline load supports `deep_copy=false`, you can specify the `INFILE` path as the offline storage address of the table to avoid hard copy. |
-| configuration item | type | Defaults | describe |
-| ---------- | ------- | ------ | ------------------------------------------------------------ |
-| delimiter | String | , | defaul for column separator, is `,` |
-| header | Boolean | true | default to include the header, is
-`true` |
-| null_value | String | null | NULL value,default population is `"null"`. When loading, strings that encounter null_value will be converted to NULL and inserted into the table. |
-| format | String | csv | default format of the loaded file is `csv`. Please add other optional formats. |
-| quote | String | "" | A surrounding string of input data. String length <= 1. The default is "", which means parsing the data without special handling of the surrounding strings. After configuring the bracketing characters, the content surrounded by the bracketing characters will be parsed as a whole. For example, when the configuration surrounding string is "#", `1, 1.0, #This is a string field, even there is a comma#` will be parsed as three. The first is integer 1, the second is a float 1.0, and the third is a string. |
-| mode | String | "error_if_exists" | Import mode:
`error_if_exists`: Only available in offline mode. If the offline table already has data, an error will be reported.
`overwrite`: Only available in offline mode, data will overwrite offline table data.
`append`: Available both offline and online, if the file already exists, the data will be appended to the original file. |
-| deep_copy | Boolean | true | `deep_copy=false` only supports offline load, you can specify `INFILE` Path as the offline storage address of the table, so no hard copy is required.
```{note}
-In the cluster version, the `LOAD DATA INFILE` statement determines whether to import data to online or offline storage according to the current execution mode (execute_mode). There is no storage difference in the stand-alone version, and the `deep_copy` option is not supported.
+- In the cluster version, the specified execution mode (defined by `execute_mode`) determines whether to import data to online or offline storage when the `LOAD DATA INFILE` statement is executed. For the standalone version, there is no difference in storage mode and the `deep_copy` option is not supported.
-Online import can only use append mode.
+- As metioned in the abouve table, online execution mode only supports append input mode.
-After the offline soft copy is imported, OpenMLDB should not modify the data in the soft link. Therefore, if the current offline data is a soft link, append import is no longer supported. Moreover, in the case of the current soft connection, using the hard copy in the overwrite mode will not delete the data of the soft connection.
+- When `deep_copy=false`, OpenMLDB doesn't support to modify the data in the soft link. Therefore, if the current offline data comes from a soft link, `append` import is no longer supported. Moreover, if current connection is soft copy, using the hard copy with `overwrite` will not delete the data of the soft connection.
```
```{warning} INFILE Path
:class: warning
-The reading of the `INFILE` path is done by batchjob. If it is a relative path, it needs a relative path that can be accessed by batchjob.
+The reading of the `INFILE` path is done by a batch job. If it is a relative path, it needs to be an accessible path. However, in a production environment, the execution of batch jobs is usually scheduled by a yarn cluster. As a result, it is not deterministic that which batch job will actually perform the task. In a testing environment, if it's multi-machine deployment, it is also unable to determine where the batch job is running.
-In a production environment, the execution of batchjobs is usually scheduled by the yarn cluster, it's not certain what executes them. In a test environment, if it's multi-machine deployment, it becomes difficult to determine where the batchjob is running.
-
-Please try to use absolute paths. In the stand-alone test, the local file starts with `file://`; in the production environment, it is recommended to use a file system such as hdfs.
+Therefore, you are suggested to use absolute paths. In the stand-alone version, the local file path starts with `file://`. In the production environment, it is recommended to use a file system such as *HDFS*.
```
## SQL Statement Template
```sql
-LOAD DATA INFILE 'file_name' OPTIONS (key = value, ...)
+LOAD DATA INFILE 'file_name' INTO TABLE 'table_name' OPTIONS (key = value, ...);
```
-## Examples:
+## Example
-Read data from file `data.csv` into table `t1` online storage. Use `,` as column separator
+The following sql example imports data from a file `data.csv` into a table `t1` using online storage. `data.csv` uses `,` as the column separator.
```sql
set @@execute_mode='online';
-LOAD DATA INFILE 'data.csv' INTO TABLE t1 ( delimit = ',' );
+LOAD DATA INFILE 'data.csv' INTO TABLE t1 OPTIONS( delimiter = ',' );
```
-Read data from file `data.csv` into table `t1`. Use `,` as column delimiter. The string "NA" will be replaced with NULL.
+The following SQL example imports data from file `data.csv` into table `t1`. `data.csv` uses `,` as the column delimiter. The null value will be replaced by a string "NA".
```sql
-LOAD DATA INFILE 'data.csv' INTO TABLE t1 ( delimit = ',', nullptr_value='NA');
+LOAD DATA INFILE 'data.csv' INTO TABLE t1 OPTIONS( delimiter = ',', null_value='NA');
```
-Soft copy `data_path` to table `t1` as offline data.
+The following example shows an example of soft copy.
```sql
set @@execute_mode='offline';
-LOAD DATA INFILE 'data_path' INTO TABLE t1 ( deep_copy=true );
+LOAD DATA INFILE 'data_path' INTO TABLE t1 OPTIONS(deep_copy=false);
```
+
diff --git a/docs/en/reference/sql/dml/index.rst b/docs/en/reference/sql/dml/index.rst
index fa1cc1ee99e..44843520b24 100644
--- a/docs/en/reference/sql/dml/index.rst
+++ b/docs/en/reference/sql/dml/index.rst
@@ -8,3 +8,4 @@ Data Manipulation Statement(DML)
INSERT_STATEMENT
LOAD_DATA_STATEMENT
+ DELETE_STATEMENT
diff --git a/docs/en/reference/sql/dql/GROUP_BY_CLAUSE.md b/docs/en/reference/sql/dql/GROUP_BY_CLAUSE.md
index 8fdb38fa862..341bd570e0d 100644
--- a/docs/en/reference/sql/dql/GROUP_BY_CLAUSE.md
+++ b/docs/en/reference/sql/dql/GROUP_BY_CLAUSE.md
@@ -1,7 +1,5 @@
# GROUP BY Clause
-All -group by- currently only has supports in batch mode (that is, console debugging SQL support, offline mode is still under development)
-
## Syntax
```SQL
@@ -15,27 +13,25 @@ GroupByClause
SELECT select_expr [,select_expr...] FROM ... GROUP BY ...
```
-## Boundary Description
-
-| SELECT statement elements | state | directions |
-| :-------------- | ------------- | :----------------------------------------------------------- |
-| GROUP BY Clause | Online not supported | Group By clause is used to group the query result set. Grouping expression lists only support simple columns. |
+## Description
+For the standalone version, `GROUP BY` is supported in all conditions. For the cluster version, the execution modes which support this clause are shown below.
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-----------------------------------------------------------|--------------|---------------------|---------------------|:------------------------------------------------------------------------------------------------------------------------|
+| GROUP BY Clause | **``✓``** | | | The Group By clause is used to group the query results.The grouping conditions only support grouping on simple columns. |
## Example
-### 1. Aggregate After Grouping By Column
+**1. Aggregate After Grouping By One Column**
```SQL
--- desc: simple SELECT grouping KEY
- SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1;
+SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1;
```
-### 2. Aggregate After Grouping By Two Columns
+**2. Aggregate After Grouping By Two Columns**
```SQL
--- desc: simple SELECT grouping KEY
- SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0;
+SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0;
```
diff --git a/docs/en/reference/sql/dql/HAVING_CLAUSE.md b/docs/en/reference/sql/dql/HAVING_CLAUSE.md
index 2a01d9b6e05..a109cf5d406 100644
--- a/docs/en/reference/sql/dql/HAVING_CLAUSE.md
+++ b/docs/en/reference/sql/dql/HAVING_CLAUSE.md
@@ -1,6 +1,6 @@
# Having Clause
-Having clause is similar to the Where clause. The Having clause allows you to filter various data after GroupBy, and the Where clause is used to filter records before aggregation.
+Having clause is similar to the Where clause. The Having clause filters data after GroupBy, and the Where clause is used to filter records before aggregation.
## Syntax
@@ -15,32 +15,30 @@ HavingClause
SELECT select_expr [,select_expr...] FROM ... GROUP BY ... HAVING having_condition
```
-## Boundary Description
+## Description
+For the standalone version, `HAVING` is supported in all conditions. For the cluster version, the execution modes, which support this clause, are shown below
-| SELECT statement elements | state | directions |
-| :------------- | ------------- | :----------------------------------------------------------- |
-| HAVING Clause | Online not supported | Having clause is similar to the Where clause. The Having clause allows you to filter various data after GroupBy, and the Where clause is used to filter records before aggregation. |
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-----------------------------------------------------------|--------------|---------------------|---------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| HAVING Clause | **``✓``** | | | The Having clause is similar to the Where clause. The Having clause filters data after GroupBy, and the Where clause is used to filter records before aggregation. | |
## Example
-### 1. Filter By Aggregation Results After Grouping
+ **1. Filter By Aggregation Results After Grouping**
```SQL
--- desc: aggregate filtering after grouping
SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1 HAVING SUM(COL2) > 1000;
```
-### 2. Filter By Aggregation Result After Grouping By Two Columns
+ **2. Filter By Aggregation Result After Grouping By Two Columns**
```sql
--- desc: aggregate filtering after grouping
SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0 HAVING SUM(COL2) > 1000;
```
-### 3. Filter By Grouping Column After Grouping
+ **3. Filter By Grouping Column After Grouping**
```sql
--- desc: aggregate filtering after grouping
-SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1 HAVING COL2 > 1000;
+SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1 HAVING COL1 ='a';
```
diff --git a/docs/en/reference/sql/dql/JOIN_CLAUSE.md b/docs/en/reference/sql/dql/JOIN_CLAUSE.md
index 9f600e9fd2f..322c8f0745a 100644
--- a/docs/en/reference/sql/dql/JOIN_CLAUSE.md
+++ b/docs/en/reference/sql/dql/JOIN_CLAUSE.md
@@ -1,12 +1,13 @@
# JOIN Clause
-OpenMLDB currently supports only one **JoinType** of `LAST JOIN`.
+OpenMLDB currently only supports `LAST JOIN`.
-LAST JOIN can be seen as a special kind of LEFT JOIN. On the premise that the JOIN condition is met, each row of the left table is spelled with a last row that meets the condition. LAST JOIN is divided into unsorted splicing and sorted splicing.
+`LAST JOIN` can be seen as a special kind of `LEFT JOIN`. On the premise that the JOIN condition is met, each row of the left table is joined with the last row of the right table that meets the condition. There are two types of `LAST JOIN`: unsorted join and sorted join.
-- Unsorted splicing refers to the direct splicing without sorting the right table.
-- Sorting and splicing refers to sorting the right table first, and then splicing.
+- The unsorted join will join two tables directly without sorting the right table.
+- The sorted join will sort the right table first, and then join two tables.
+Like `LEFT JOIN`, `LAST JOIN` returns all rows in the left table, even if there are no matched rows in the right table.
## Syntax
```
@@ -18,51 +19,227 @@ JoinType ::= 'LAST'
## SQL Statement Template
```sql
-SELECT ... FROM table_ref LAST JOIN table_ref;
+SELECT ... FROM table_ref LAST JOIN table_ref ON expression;
```
-## Boundary Description
+## Description
-| SELECT statement elements | state | direction |
-| :------------- | --------------- | :----------------------------------------------------------- |
-| JOIN Clause | Only LAST JOIN is supported | Indicates that the data source multiple tables JOIN. OpenMLDB currently only supports LAST JOIN. During Online Serving, you need to follow [The usage specification of LAST JOIN under Online Serving](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md#online-serving usage specification of last-join) |
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-----------------------------------------------------------|--------------|---------------------|---------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| JOIN Clause | **``✓``** | **``✓``** | **``✓``** | The Join clause indicates that the data source comes from multiple joined tables. OpenMLDB currently only supports LAST JOIN. For Online Request Mode, please follow [the specification of LAST JOIN under Online Request Mode](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#the-usage-specification-of-last-join-under-online-serving) |
-### LAST JOIN without ORDER BY
-#### Example: **LAST JOIN Unsorted Concatenation**
-```sql
--- desc: simple spelling query without ORDER BY
-SELECT t1.col1 as t1_col1, t2.col1 as t2_col2 from t1 LAST JOIN t2 ON t1.col1 = t2.col1
-```
+### LAST JOIN without ORDER BY
-When `LAST JOIN` is spliced without sorting, the first hit data row is spliced
+#### Example of the Computation Logic
-![Figure 7: last join without order](../dql/images/last_join_without_order.png)
+The unsorted `LAST JOIN` will concat every row of the left table with the last matched row of the right table.
+![Figure 7: last join without order](../dql/images/last_join_without_order.png)
-Take the second row of the left table as an example, the right table that meets the conditions is unordered, there are 2 hit conditions, select the last one `5, b, 2020-05-20 10:11:12`
+Take the second row of the left table as an example. The right table is unordered, and there are 2 matched rows. The last one `5, b, 2020-05-20 10:11:12` will be joined with the second row of the left.
+The final result is shown in the figure bellow.
![Figure 8: last join without order result](../dql/images/last_join_without_order2.png)
-The final result is shown in the figure above.
+```{note}
+To realize the above JOIN result, please follow [the specification of LAST JOIN under Online Request mode](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#specifications-of-last-join-under-online-request-mode) like the SQL example bellow, even if you are using offline mode.
+Otherwise, you may not obtain the above result because of the uncertainty of the underlying storage order, although the result is correct as well.
+```
-### LAST JOIN with ORDER BY
+#### SQL Example
-#### Example: LAST JOIN Sorting And Splicing
+The following SQL commands created the left table t1 as mentioned above and inserted corresponding data.
+In order to check the results conveniently, it is recommended to create index on `col1` and use `std_ts` as timestamp. It doesn't matter if you create t1 without index, since it doesn't affect the concatenation in this case.
+```sql
+>CREATE TABLE t1 (id INT, col1 STRING,std_ts TIMESTAMP,INDEX(KEY=col1,ts=std_ts));
+SUCCEED
+>INSERT INTO t1 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t1 values(2,'b',20200520101114);
+SUCCEED
+>INSERT INTO t1 values(3,'c',20200520101116);
+SUCCEED
+>SELECT * from t1;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 1 a 20200520101112
+ 2 b 20200520101114
+ 3 c 20200520101116
+ ---- ------ ----------------
+
+3 rows in set
+```
+The following SQL commands created the right table t2 as mentioned above and inserted corresponding data.
+
+```{note}
+The storage order of data rows is not necessarily the same as their insert order. And the storage order will influence the matching order when JOIN.
+In this example, we want to realize the storage order of t2 as the above figure displayed, which will lead to a result that is convenient to check.
+To guarantee the storage order of t2, please create following index, do not set `ts`, and sequentially instert data one by one.
+Detail explanation is in [columnindex](../ddl/CREATE_TABLE_STATEMENT.md#columnindex).
+```
+```sql
+>CREATE TABLE t2 (id INT, col1 string,std_ts TIMESTAMP,INDEX(KEY=col1));
+SUCCEED
+>INSERT INTO t2 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(2,'a',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(3,'b',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(4,'c',20200520101114);
+SUCCEED
+>INSERT INTO t2 values(5,'b',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(6,'c',20200520101113);
+SUCCEED
+>SELECT * from t2;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 2 a 20200520101113
+ 1 a 20200520101112
+ 5 b 20200520101112
+ 3 b 20200520101113
+ 6 c 20200520101113
+ 4 c 20200520101114
+ ---- ------ ----------------
+
+6 rows in set
+```
+The result of `SELECT` with `LAST JOIN` is shown below.
+```sql
+> SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 5 b 20200520101112
+ 3 c 20200520101116 6 c 20200520101113
+ ---- ------ ---------------- ---- ------ ----------------
+
+3 rows in set
+```
+If you create t1 without index, the result of `JOIN` is the same but the order of `SELECT` result is different.
+```sql
+> SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 3 c 20200520101116 6 c 20200520101113
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 5 b 20200520101112
+ ---- ------ ---------------- ---- ------ ----------------
+
+3 rows in set
+```
-```SQL
--- desc: Simple spelling query with ORDER BY
-SELECT t1.col1 as t1_col1, t2.col1 as t2_col2 from t1 LAST JOIN t2 ORDER BY t2.std_ts ON t1.col1 = t2.col1
+```{note}
+The execution of `LAST JOIN` can be optimized by index. If there is index corresponding with the `ORDER BY` and conditions in `LAST JOIN` clause, its `ts` will be used as the implicit order for unsorted `LAST JOIN`. If there is not index like this, the implicit order is the storage order. But the storage order of a table without index is unpredictable.
+If the `ts` was not given when create index, OpenMLDB uses the time when the data was inserted as `ts`.
```
-When `LAST JOIN` is configured with `Order By`, the right table is sorted by Order, and the last hit data row is spliced.
+
+
+### LAST JOIN with ORDER BY
+
+#### Example of the Computation Logic
+
+When `LAST JOIN` is configured with `ORDER BY`, the right table is sorted by the specified order, and the last matched data row will be joined.
![Figure 9: last join with order](../dql/images/last_join_with_order1.png)
-Taking the second row of the left table as an example, there are 2 items in the right table that meet the conditions. After sorting by `std_ts`, select the last item `3, b, 2020-05-20 10:11:13`
+Taking the second row of the left table as an example, there are 2 rows in the right table that meet the conditions. After sorting by `std_ts`, the last row `3, b, 2020-05-20 10:11:13` will be joined.
![Figure 10: last join with order result](../dql/images/last_join_with_order2.png)
The final result is shown in the figure above.
+
+#### SQL Example
+
+
+The following SQL commands created the left table t1 as mentioned above and inserted corresponding data.
+```SQL
+>CREATE TABLE t1 (id INT, col1 STRING,std_ts TIMESTAMP);
+SUCCEED
+>INSERT INTO t1 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t1 values(2,'b',20200520101114);
+SUCCEED
+>INSERT INTO t1 values(3,'c',20200520101116);
+SUCCEED
+>SELECT * from t1;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 1 a 20200520101112
+ 2 b 20200520101114
+ 3 c 20200520101116
+ ---- ------ ----------------
+
+3 rows in set
+```
+The following SQL commands created the right table t2 as mentioned above and inserted corresponding data.
+
+```sql
+>CREATE TABLE t2 (id INT, col1 string,std_ts TIMESTAMP);
+SUCCEED
+>INSERT INTO t2 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(2,'a',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(3,'b',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(4,'c',20200520101114);
+SUCCEED
+>INSERT INTO t2 values(5,'b',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(6,'c',20200520101113);
+SUCCEED
+>SELECT * from t2;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 2 a 20200520101113
+ 1 a 20200520101112
+ 5 b 20200520101112
+ 3 b 20200520101113
+ 6 c 20200520101113
+ 4 c 20200520101114
+ ---- ------ ----------------
+
+6 rows in set
+```
+The result of `SELECT` with `LAST JOIN` is shown below.
+```sql
+>SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 3 b 20200520101113
+ 3 c 20200520101116 4 c 20200520101114
+ ---- ------ ---------------- ---- ------ ----------------
+```
+
+### LAST JOIN with No Matched Rows
+The following example shows the result of LAST JOIN with no matched rows.
+
+Please insert a new row into t1 (created in [Example of LAST JOIN with ORDER BY](#last-join-with-order-by)) as follows, then run `LAST JOIN` command.
+
+```sql
+>INSERT INTO t1 values(4,'d',20220707111111);
+SUCCEED
+>SELECT * from t1 LAST JOIN t2 ORDER BY t2.std_ts ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ------ ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ------ ------ ----------------
+ 4 d 20220707111111 NULL NULL NULL
+ 3 c 20200520101116 4 c 20200520101114
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 3 b 20200520101113
+ ---- ------ ---------------- ------ ------ ----------------
+```
\ No newline at end of file
diff --git a/docs/en/reference/sql/dql/LIMIT_CLAUSE.md b/docs/en/reference/sql/dql/LIMIT_CLAUSE.md
index 492ee238197..19d5315e829 100644
--- a/docs/en/reference/sql/dql/LIMIT_CLAUSE.md
+++ b/docs/en/reference/sql/dql/LIMIT_CLAUSE.md
@@ -1,6 +1,6 @@
# Limit Clause
-The Limit clause is used to limit the number of results. OpenMLDB currently only supports Limit accepting one parameter, indicating the maximum number of rows of returned data;
+The Limit clause is used to limit the number of results. OpenMLDB currently only supports one parameter to limit the maximum number of rows of returned data.
## Syntax
@@ -15,18 +15,19 @@ LimitClause
SELECT ... LIMIT ...
```
-## Boundary Description
+## Description
+For the standalone version, `LIMIT` is supported in all conditions. For the cluster version, the execution modes, which support this clause, are shown below.
+
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-----------------------------------------------------------|--------------|---------------------|---------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| LIMIT Clause | **``✓``** | **``✓``** | **``✓``** | The Limit clause is used to limit the number of results. OpenMLDB currently only supports one parameter to limit the maximum number of rows of returned data. |
-| SELECT statement elements | state | direction |
-| :------------- | -------------------- | :----------------------------------------------------------- |
-| LIMIT Clause | Online Serving is not supported | The Limit clause is used to limit the number of results. OpenMLDB currently only supports Limit accepting one parameter, indicating the maximum number of rows of returned data; |
## Example
### SELECT with LIMIT
```SQL
--- desc: SELECT Limit
SELECT t1.COL1 c1 FROM t1 limit 10;
```
diff --git a/docs/en/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md b/docs/en/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
index 1c61d649bc2..edcd5e9d654 100644
--- a/docs/en/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
+++ b/docs/en/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
@@ -1,6 +1,6 @@
-# No Table SELECT
+# No-table SELECT
-The no table Select statement calculates the constant expression operation list, and the expression calculation does not need to depend on the table and column.
+The no-table Select statement computes the constant expression, and the computing does not depend on tables and columns.
## Syntax
@@ -14,25 +14,30 @@ SelectExpr ::= ( Identifier '.' ( Identifier '.' )? )? '*'
```
-## SQL Statement Template
+## SQL Template
```sql
SELECT const_expr [, const_expr ...];
```
-## 2. SELECT Statement Elements
+## Description
-| SELECT statement elements | state | direction |
-| :------------- | ------------------- | :----------------------------------------------------------- |
-| Unlabeled SELECT statement | OnlineServing not supported | The no table Select statement calculates the constant expression operation list, and the expression calculation does not need to depend on the table and column |
+
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:----------------------------|--------------|---------------------|---------------------|:----------------------------------------------------------------------------------------------------------------------------------------|
+| No-table SELECT statement |**``✓``** |**``✓``** | | The no-table SELECT statement computes the constant expression operation list, and the computation does not depend on tables or columns |
#### Examples
+SELECT constant literal
```sql
--- desc: SELECT constant literal
SELECT 1, 1L, 1.0f, 2.0, 'Hello';
--- desc: SELECT constant expression
-SELECT 1+1, 1L + 1L, 1.0f - 1.0f, 2.0*2.0, 'Hello' LIKE 'He%';
--- desc: SELECT function expression
+```
+SELECT constant expression
+```sql
+SELECT 1+1, 1L + 1L, 1.0f - 1.0f, 2.0*2.0, 'Hello' LIKE 'He%';
+```
+SELECT function expression
+```sql
SELECT substr("hello world", 3, 6);
```
\ No newline at end of file
diff --git a/docs/en/reference/sql/dql/SELECT_INTO_STATEMENT.md b/docs/en/reference/sql/dql/SELECT_INTO_STATEMENT.md
index 791c8ecc6c5..298cc755f53 100644
--- a/docs/en/reference/sql/dql/SELECT_INTO_STATEMENT.md
+++ b/docs/en/reference/sql/dql/SELECT_INTO_STATEMENT.md
@@ -1,59 +1,64 @@
# SELECT INTO
+The `SELECT INTO OUTFILE` statement is used to export the query results into a file.
+```{note}
+ The [`LOAD DATA INFILE`](../dml/LOAD_DATA_STATEMENT.md) statement is complementary to `SELECT INTO OUTFILE`, which allows users to create a table from a specified file and load data into the table.
+```
## Syntax
```sql
SelectIntoStmt
::= SelectStmt 'INTO' 'OUTFILE' filePath SelectIntoOptionList
-filePath ::= string_literal
+filePath
+ ::= string_literal
SelectIntoOptionList
- ::= 'OPTIONS' '(' SelectInfoOptionItem (',' SelectInfoOptionItem)* ')'
+ ::= 'OPTIONS' '(' SelectInfoOptionItem (',' SelectInfoOptionItem)* ')'
SelectInfoOptionItem
- ::= 'DELIMITER' '=' string_literal
- |'HEADER' '=' bool_literal
- |'NULL_VALUE' '=' string_literal
- |'FORMAT' '=' string_literal
- |'MODE' '=' string_literal
+ ::= 'DELIMITER' '=' string_literal
+ |'HEADER' '=' bool_literal
+ |'NULL_VALUE' '=' string_literal
+ |'FORMAT' '=' string_literal
+ |'MODE' '=' string_literal
```
-The `SELECT INTO OUTFILE` statement allows the user to export the query results of the table to a file. The [`LOAD DATA INFILE`](../dml/LOAD_DATA_STATEMENT.md) statement is complementary to `SELECT INTO OUTFILE`, which is used to create a table from the specified file and load data into the table. `SELECT INTO OUTFILE` is divided into three parts.
-
-- The first part is an ordinary SELECT statement, through which the required data is queried;
-- The second part is `filePath`, which defines which file to export the queried records to;
-- The third part `SelectIntoOptionList` is an optional option, and its possible values are:
+There are three parts in `SELECT INTO OUTFILE`.
+- The first part is an ordinary `SELECT` statement, which queries the data that needs to be exported.
+- The second part is `filePath`, which defines the file that the data should be exported into.
+- The third part is `SelectIntoOptionList`, which is an optional part, and its possible values are shown in the following table.
-| configuration item | type | defaults | describe |
-| ---------- | ------- | --------------- | ------------------------------------------------------------ |
-| delimiter | String | , | default column separator is, `,` |
-| header | Boolean | true | default to include headers, `true` |
-| null_value | String | null | NULL default padding value,`"null"` |
-| format | String | csv | default output file format, `csv`. Please add other optional formats. |
-| mode | String | error_if_exists | Output mode:
`error_if_exists`: Indicates that an error will be reported if the file already exists.
`overwrite`: Indicates that if the file already exists, the data will overwrite the contents of the original file.
`append`: Indicates that if the file already exists, the data will be appended to the original file.
When the configuration is not displayed, the default mode is `error_if_exists`. |
-| quote | String | "" | The output data string length it <= 1. The default is "", which means that the string surrounding the output data is empty. When a surrounding string is configured, a field will be surrounded by the surrounding string. For example, we configure the surrounding string as `"#"` and the original data as {1 1.0, This is a string, with comma}. The output text is `#1#, #1.0#, #This is a string, with comma#. `Please note that currently OpenMLDB does not support the escape of quote characters, so users need to choose quote characters carefully to ensure that the original string does not contain quote characters.
- |
+| Configuration Item | Type | Default Value | Note |
+|--------------------|---------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| delimiter | String | , | It defines the column separator of the exported file. |
+| header | Boolean | true | It defines whether the exported table will contain a header. It will include header for default. |
+| null_value | String | null | It defines the padding value for NULL, which is string `null` for default. |
+| format | String | csv | It defines the format of the output file.
`csv` is the default format.
`parquet` format is supported in cluster version. |
+| mode | String | error_if_exists | It defines the output mode.
`error_if_exists` is the default mode which indicates that an error will be reported if the file already exists.
`overwrite` indicates that if the file already exists, the data will overwrite the contents of the original file.
`append` indicates that if the file already exists, the data will be appended to the original file. |
+| quote | String | "" | It defines the string surrounding the output data. The string length should be <= 1. The default is "", which means that the string surrounding the output data is empty. When the surrounding string is configured, every exported field will be surrounded by this string. For example, we configure the surrounding string as `"#"` and the original data as {1, 1.0, This is a string, with comma}. The output text will be `1, 1.0, #This is a string, with comma#. ` |
- The [`LOAD DATA INFILE`](../dml/LOAD_DATA_STATEMENT.md) statement is complementary to `SELECT INTO OUTFILE`, which allows the user to create a table from a specified file and load data into the table.
+````{important}
+Currently, only cluster version supports the escape of quote string. Please guarantee there are not any quote characters in the original string in standalone version.
+````
## SQL Statement Template
```sql
-SELECT ... INTO OUTFILE 'file_path' OPTIONS (key = value, ...)
+SELECT ... INTO OUTFILE 'file_path' OPTIONS (key = value, ...);
```
## Examples
-- Query output from table `t1` into `data.csv` file, using `,` as column delimiter
+- The following SQL command exports the result of a query from table `t1` into `data.csv` file, using `,` as column delimiter.
```SQL
-SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data.csv' OPTIONS ( delimit = ',' );
+SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data.csv' OPTIONS ( delimiter = ',' );
```
-- Query output from table `t1` to `data.csv` file, use `|` as column delimiter, NULL values are filled with `NA` string:
+- The following SQL command exports the result of a query from table `t1` into `data.csv` file, using `|` as column delimiter and NULL values are filled with string `NA`.
```SQL
-SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data2.csv' OPTIONS ( delimit = '|', null_value='NA');
+SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data2.csv' OPTIONS ( delimiter = '|', null_value='NA');
```
diff --git a/docs/en/reference/sql/dql/SELECT_STATEMENT.md b/docs/en/reference/sql/dql/SELECT_STATEMENT.md
index ed7367bdf03..84fff33fff4 100644
--- a/docs/en/reference/sql/dql/SELECT_STATEMENT.md
+++ b/docs/en/reference/sql/dql/SELECT_STATEMENT.md
@@ -109,29 +109,24 @@ TableAsName
::= 'AS'? Identifier
```
-## SELECT Statement Elements
-
-| SELECT statement elements | state | illustrate |
-| :--------------------------------------------- | ---------------------- | :----------------------------------------------------------- |
-| `SELECT` [`SelectExprList`](#selectexprlist) | supported |
-A list of projection operations, generally including column names, expressions, or '*' for all columns |
-| `FROM` [`TableRefs`](#tablerefs) | supported |
-Indicates the data source, the data source can be one table (`select * from t;`) or multiple tables JOIN (`select * from t1 join t2;`) or 0 tables ( `select 1+1;`) |
-| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | Only LAST JOIN is supported | Indicates that the data source multiple tables JOIN. OpenMLDB currently only supports LAST JOIN. During Online Serving, you need to follow [OP's usage specification under Online Serving](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md) |
-| [`WHERE` Clause](../dql/WHERE_CLAUSE.md) | Online Serving not supported |
-The Where clause is used to set filter conditions, and only the data that meets the conditions will be included in the query result. |
-| [`GROUP BY` Clause](../dql/GROUP_BY_CLAUSE.md) | Online not supported |
-The Group By clause is used to group the query result set. Grouping expression lists only support simple columns. |
-| [`HAVING` Clause](../dql/HAVING_CLAUSE.md) | Online not supported |
-The Having clause is similar to the Where clause. The Having clause allows you to filter various data after GroupBy, and the Where clause is used to filter records before aggregation. |
-| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | Online Training not supported |
-The window clause is used to define one or several windows. Windows can be named or anonymous. Users can call aggregate functions on the window to perform some analytical calculations (```sql agg_func() over window_name```). During Online Serving, you need to follow [OP's usage specification under Online Serving](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md) |
-| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | Online Serving does not support | The Limit clause is used to limit the number of results. OpenMLDB currently only supports Limit accepting one parameter, indicating the maximum number of rows of returned data; |
-| `ORDER BY` Clause | not supported | Standard SQL also supports the OrderBy clause. OpenMLDB does not currently support the Order clause. For example, the query `SELECT * from t1 ORDER BY col1;` is not supported in OpenMLDB. |
+## SELECT Statement
+
+
+| `SELECT` Statement and Related Clauses | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-----------------------------------------------|--------------|---------------------|---------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [`SELECT Clause`](#selectexprlist) | **``✓``** | **``✓``** | **``✓``** | A list of projection operations, generally including column names, expressions, or ‘*’ for all columns. |
+| [`FROM Clause`](#tablerefs) | **``✓``** | **``✓``** | **``✓``** | The FROM clause indicates the data source.
The data source can be one table (`select * from t;`) or multiple tables that LAST JOIN together (see [JOIN CLAUSE](../dql/JOIN_CLAUSE.md)) or no table ( `select 1+1;`), see [NO_TABLE SELECT](../dql/NO_TABLE_SELECT_CLAUSE.md) |
+| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | **``✓``** | **``✓``** | **``✓``** | The JOIN clause indicates that the data source comes from multiple joined tables. OpenMLDB currently only supports LAST JOIN. For Online Request Mode, please follow [the specification of LAST JOIN under Online Request Mode](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#the-usage-specification-of-last-join-under-online-serving) |
+| [`WHERE` Clause](../dql/WHERE_CLAUSE.md) | **``✓``** | **``✓``** | | The WHERE clause is used to set filter conditions, and only the data that meets the conditions will be included in the query result. |
+| [`GROUP BY` Clause](../dql/GROUP_BY_CLAUSE.md) | **``✓``** | | | The GROUP BY clause is used to group the query results.The grouping conditions only support simple columns. |
+| [`HAVING` Clause](../dql/HAVING_CLAUSE.md) | **``✓``** | | | The HAVING clause is similar to the WHERE clause. The HAVING clause filters data after GROUP BY, and the WHERE clause is used to filter records before aggregation. | |
+| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | **``✓``** | | **``✓``** | The WINDOW clause is used to define one or several windows. Windows can be named or anonymous. Users can call aggregate functions on the window to perform analysis (```sql agg_func() over window_name```). For Online Request Mode, please follow the [specification of WINDOW Clause under Online Request Mode](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#window-usage-specification-under-online-serving) |
+| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | **``✓``** | **``✓``** | **``✓``** | The LIMIT clause is used to limit the number of results. OpenMLDB currently only supports one parameter to limit the maximum number of rows of returned data. |
+| `ORDER BY` Clause | | | | Standard SQL also supports the ORDER BY keyword, however OpenMLDB does not support this keyword currently. For example, the query `SELECT * from t1 ORDER BY col1;` is not supported in OpenMLDB. |
```{warning}
-The online mode or the stand-alone version of the selection may not obtain complete data.
-Because a query may perform a large number of scans on multiple tablet servers, for the stability of tablet servers, a single tablet server limits the maximum amount of scanned data, namely `scan_max_bytes_size`.
+The `SELECT` running in online mode or the stand-alone version may not obtain complete data.
+Because a query may perform a large number of scans on multiple tablets, for stability, the largest number of bytes to scan is limited, namely `scan_max_bytes_size`.
-If the select result is truncated, the tablet server will display a log of `reach the max byte ...`, but the query will not report an error.
-```
\ No newline at end of file
+If the select results are truncated, the message of `reach the max byte ...` will be recorded in the tablet's log, but there will be no error.
+```
diff --git a/docs/en/reference/sql/dql/WHERE_CLAUSE.md b/docs/en/reference/sql/dql/WHERE_CLAUSE.md
index 5ca834396f2..5eb5892c160 100644
--- a/docs/en/reference/sql/dql/WHERE_CLAUSE.md
+++ b/docs/en/reference/sql/dql/WHERE_CLAUSE.md
@@ -16,25 +16,24 @@ WhereClause
SELECT select_expr [,select_expr...] FROM ... WHERE where_condition
```
-## Boundary Description
+## Description
+For the standalone version, `WHERE` is supported in all conditions. For the cluster version, the execution modes which support this clause are shown below.
-| SELECT statement elements | state | illustrate |
-| :------------- | -------------------- | :----------------------------------------------------------- |
-| WHERE Clause | Online Serving not supportED | The Where clause is used to set filter conditions, and only the data that meets the conditions will be included in the query result. |
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:----------------------------|--------------|---------------------|---------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| WHERE Clause | **``✓``** | **``✓``** | | The Where clause is used to set filter conditions, and only the data that meets the conditions will be included in the query result. |
## Example
-### Simple Conditional Filtering
+### Simple Condition Filtering
```SQL
--- desc: SELECT simple filter
- sql: SELECT COL1 FROM t1 where COL1 > 10;
+sql: SELECT COL1 FROM t1 where COL1 > 10;
```
-### Complex Conditions Simple Condition Filtering
+### Complex Conditions Filtering
```sql
--- desc: The SELECT filter condition is a complex logical relational expression
- sql: SELECT COL1 FROM t1 where COL1 > 10 and COL2 = 20 or COL1 =0;
+sql: SELECT COL1 FROM t1 where COL1 > 10 and COL2 = 20 or COL1 =0;
```
diff --git a/docs/en/reference/sql/dql/WINDOW_CLAUSE.md b/docs/en/reference/sql/dql/WINDOW_CLAUSE.md
index 665420f0010..823c9872cfc 100644
--- a/docs/en/reference/sql/dql/WINDOW_CLAUSE.md
+++ b/docs/en/reference/sql/dql/WINDOW_CLAUSE.md
@@ -4,54 +4,66 @@
```sql
WindowClauseOptional
- ::= ( 'WINDOW' WindowDefinition ( ',' WindowDefinition )* )?
+ ::= ( 'WINDOW' WindowDefinition ( ',' WindowDefinition )* )?
+
WindowDefinition
- ::= WindowName 'AS' WindowSpec
+ ::= WindowName 'AS' WindowSpec
WindowSpec
- ::= '(' WindowSpecDetails ')'
-
-WindowSpecDetails
- ::= [ExistingWindowName] [WindowUnionClause] WindowPartitionClause WindowOrderByClause WindowFrameClause [WindowExcludeCurrentTime] [WindowInstanceNotInWindow]
+ ::= '(' WindowSpecDetails ')'
+WindowSpecDetails
+ ::= [ExistingWindowName] [WindowUnionClause] WindowPartitionClause WindowOrderByClause WindowFrameClause (WindowAttribute)*
WindowUnionClause
- :: = ( 'UNION' TableRefs)
+ :: = ( 'UNION' TableRefs)
WindowPartitionClause
- ::= ( 'PARTITION' 'BY' ByList )
+ ::= ( 'PARTITION' 'BY' ByList )
WindowOrderByClause
- ::= ( 'ORDER' 'BY' ByList )
-
+ ::= ( 'ORDER' 'BY' ByList )
WindowFrameClause
- ::= ( WindowFrameUnits WindowFrameExtent [WindowFrameMaxSize])
+ ::= ( WindowFrameUnits WindowFrameExtent [WindowFrameMaxSize])
WindowFrameUnits
- ::= 'ROWS'
- | 'ROWS_RANGE'
+ ::= 'ROWS'
+ | 'ROWS_RANGE'
WindowFrameExtent
- ::= WindowFrameStart
- | WindowFrameBetween
+ ::= WindowFrameStart
+ | WindowFrameBetween
+
WindowFrameStart
- ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
- | 'CURRENT' 'ROW'
+ ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
+ | 'CURRENT' 'ROW'
+
WindowFrameBetween
- ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+ ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+
WindowFrameBound
- ::= WindowFrameStart
- | ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'FOLLOWING'
-
-WindowExcludeCurrentTime
- ::= 'EXCLUDE' 'CURRENT_TIME'
+ ::= WindowFrameStart
+ | ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'FOLLOWING'
+
+WindowAttribute
+ ::= WindowExcludeCurrentTime
+ | WindowExcludeCurrentRow
+ | WindowInstanceNotInWindow
+
+WindowExcludeCurrentTime
+ ::= 'EXCLUDE' 'CURRENT_TIME'
+
+WindowExcludeCurrentRow
+ ::= 'EXCLUDE' 'CURRENT_ROW'
WindowInstanceNotInWindow
- :: = 'INSTANCE_NOT_IN_WINDOW'
+ :: = 'INSTANCE_NOT_IN_WINDOW'
```
-*Window call function* implements functionality similar to aggregate functions. The difference is that the window call function does not need to pack the query results into a single line of output—in the query output, each line is separated. However, the window caller can scan all rows that may be part of the current row's group, depending on the grouping specification of the window caller (the `PARTITION BY` column). The syntax for calling a function from a window is one of the following:
+*Window function* is similar to aggregate functions. The difference is that the window function does not need to pack the query results into a single line when output the results. Instead, each line is separated when using WINDOW clause.
+However, the window function can scan all rows that may be part of the current row's group, depending on the grouping specification of the window function (the `PARTITION BY` on columns).
+The syntax for calling a function over a window is shown bellow:
```
function_name ([expression [, expression ... ]]) OVER ( window_definition )
@@ -75,11 +87,11 @@ SELECT select_expr [, select_expr ...], window_function_name(expr) OVER window_n
SELECT select_expr [,select_expr...], window_function_name(expr) OVER window_name, ... FROM ... WINDOW AS window_name (PARTITION BY ... ORDER BY ... ROWS_RANEG BETWEEN ... AND ...)
```
-## Boundary Description
+## Description
-| SELECT statement elements | state | illustrate |
-| :------------- | ---------------------- | :----------------------------------------------------------- |
-| WINDOW Clause | Online Training not supported | The window clause is used to define one or several windows. Windows can be named or anonymous. Users can call aggregate functions on the window to perform some analytical calculations (```sql agg_func() over window_name```).
OpenMLDB currently only supports historical windows, not future windows (ie, does not support window boundaries of type `FOLLOWING`).
OpenMLDB windows only support `PARTITION BY` columns, not `PARTITION BY` operations or function expressions.
OpenMLDB windows only support `ORDER BY` columns, not `ORDER BY` operations or function expressions.
In Online Serving, you need to follow [3.2 Window usage specification under Online Serving](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md#online-serving window usage specification) |
+| `SELECT` Statement Elements | Offline Mode | Online Preview Mode | Online Request Mode | Note |
+|:-------------------------------------------------------|--------------|---------------------|---------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| WINDOW Clause | **``✓``** | | **``✓``** | The window clause is used to define one or several windows. Windows can be named or anonymous. Users can call aggregate functions on the window to perform analysis (```sql agg_func() over window_name```). For Online Request Mode, please follow the [specification of WINDOW Clause under Online Request](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#window-usage-specification-under-online-serving) |
## Basic WINDOW SPEC Syntax Elements
@@ -87,10 +99,10 @@ SELECT select_expr [,select_expr...], window_function_name(expr) OVER window_nam
```sql
WindowPartitionClause
- ::= ( 'PARTITION' 'BY' ByList )
+ ::= ( 'PARTITION' 'BY' ByList )
WindowOrderByClause
- ::= ( 'ORDER' 'BY' ByList )
+ ::= ( 'ORDER' 'BY' ByList )
```
The `PARTITION BY` option groups the rows of the query into *partitions*, which are processed separately in the window function. `PARTITION BY` and the query level `GROUP BY` clause do similar work, except that its expressions can only be used as expressions and not as output column names or numbers. OpenMLDB requires that `PARTITION BY` must be configured. And currently **only supports grouping by column**, cannot support grouping by operation and function expression.
@@ -101,8 +113,8 @@ The `ORDER BY` option determines the order in which the rows in the partition ar
```sql
WindowFrameUnits
- ::= 'ROWS'
- | 'ROWS_RANGE'
+ ::= 'ROWS'
+ | 'ROWS_RANGE'
```
WindowFrameUnits defines the frame type of the window. OpenMLDB supports two types of window frames: ROWS and ROWS_RANGE
@@ -119,33 +131,39 @@ The SQL standard RANGE class window OpenMLDB system does not currently support i
```sql
WindowFrameExtent
- ::= WindowFrameStart
- | WindowFrameBetween
+ ::= WindowFrameStart
+ | WindowFrameBetween
WindowFrameBetween
- ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+ ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
WindowFrameBound
- ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
- | 'CURRENT' 'ROW'
+ ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
+ | 'CURRENT' 'ROW'
```
- **WindowFrameExtent**定义了窗口的上界和下界。框架类型可以用 `ROWS`或`ROWS_RANGE`声明;
+ **WindowFrameExtent** defines the upper and lower bounds of a window. The window type can be defined by `ROWS` or `ROWS_RANGE`.
-- CURRENT ROW: 表示当前行
-- UNBOUNDED PRECEDING: 表示无限制上界
-- `expr` PRECEDING
- - 窗口类型为ROWS时,`expr`必须为一个正整数。它表示边界为当前行往前`expr`行。
- - 窗口类型为ROWS_RANGE时,`expr`一般为时间区间(例如`10s`, `10m`,`10h`, `10d`),它表示边界为当前行往前移expr时间段(例如,10秒,10分钟,10小时,10天)
-- OpenMLDB支持默认边界是闭合的。但支持OPEN关键字来修饰边界开区间
-- 请注意:标准SQL中,还支持FOLLOWING的边界,当OpenMLDB并不支持。
-#### **Example: 有名窗口(Named Window)**
+- `CURRENT ROW` is the row currently being computed.
+- `UNBOUNDED PRECEDING` indicates the upper bound of this window is unlimited.
+- `expr PRECEDING`
+ - When the window is `ROWS` type, `expr` must be a positive integer, which indicates the upper boundary is the `expr`th row before current row.
+ - When the window type is `ROWS_RANGE`,`expr` should be a time interval, like `10s`, `10m`,`10h`, `10d`. The upper bound is the `expr` ahead of the time of current row.
+- By default, OpenMLDB uses closed interval. To change this, you can use keyword `OPEN`.
+
+
+```{Note}
+Standard SQL also supports `FOLLOWING` keyword, but OpenMLDB doesn't support it currently.
+````
+
+#### Example
+- **Named Window**
```SQL
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW)
```
-#### **Example: 匿名窗口**
+- **Anonymous Window**
```SQL
SELECT id, pk1, col1, std_ts,
@@ -153,33 +171,38 @@ sum(col1) OVER (PARTITION BY pk1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CU
from t1;
```
-#### **Example: ROWS窗口**
+- **ROWS Window**
+The following `WINDOW` clause defines a `ROWS` window containing preceding 1000 rows and current row. The window will contain a maximum of 1001 rows.
```SQL
--- ROWS example
--- desc: window ROWS, 前1000条到当前条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 1000 PRECEDING AND CURRENT ROW);
```
-#### **Example: ROWS RANGE窗口**
+
+- **ROWS_RANGE Window**
+
+The following `WINDOW` clause defines a `ROWS_RANGE` window containing preceding 10s rows and current row.
```SQL
--- ROWS example
--- desc: window ROWS_RANGE, 前10s到当前条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW);
```
-## OpenMLDB特有的WINDOW SPEC元素
+## WindowSpec Elements Specifically Designed by OpenMLDB
+
+
+### 1. WINDOW ... UNION
-### Window With Union
```sql
WindowUnionClause
- :: = ( 'UNION' TableRefs)
+ :: = ( 'UNION' TableRefs)
```
-#### **Example: Window with union 一张副表**
+
+#### Example
+- **Window with `UNION` On 2 Tables**
+
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -188,7 +211,8 @@ WINDOW w1 AS (UNION t2 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PR
![Figure 2: window union one table](../dql/images/window_union_1_table.png)
-#### **Example: Window with union 多张副表**
+
+- **Window with `UNION` on Multiple Tables**
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -197,7 +221,9 @@ WINDOW w1 AS (UNION t2, t3 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10
![Figure 3: window union two tables](../dql/images/window_union_2_table.png)
-#### **Example: Window with union 样本表不进入窗口**
+
+- **Window with `UNION` and `INSTANCE_NOT_IN_WINDOW`**
+
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -206,7 +232,10 @@ WINDOW w1 AS (UNION t2 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PR
![Figure 4: window union one table with instance_not_in_window](../dql/images/window_union_1_table_instance_not_in_window.png)
-#### **Example: Window with union 列筛选子查询**
+
+
+- **Window with `UNION` Containing Subquery**
+
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -216,53 +245,76 @@ WINDOW w1 AS
PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW);
```
-### Window Exclude Current Time
+
+### 2. Window with EXCLUDE CURRENT_TIME
+Only rows whose `timestamp` values are different to `CURRENT ROW` will be included in the window.
```
WindowExcludeCurrentTime
- ::= 'EXCLUDE' 'CURRENT_TIME'
+ ::= 'EXCLUDE' 'CURRENT_TIME'
```
-#### **Example: ROWS窗口EXCLUDE CURRENT TIME**
+#### Example
+- **ROWS Window with EXCLUDE CURRENT_TIME**
+
+The following `WINDOW` clause defines a `ROWS` window containing preceding 1000 rows and current row. Any other rows in the window will not have the same time as the `CURRENT ROW`.
```SQL
--- ROWS example
--- desc: window ROWS, 前1000条到当前条, 除了current row以外窗口内不包含当前时刻的其他数据
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 1000 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
```
-#### **Example: ROW RANGE窗口EXCLUDE CURRENT TIME**
+- **ROWS_RANGE Window with EXCLUDE CURRENT_TIME**
+
+
+The following `WINDOW` clause defines a `ROWS_RANGE` window containing preceding 10s rows and current row. Any other rows in the window will not have the same time as the `CURRENT ROW`.
```SQL
--- ROWS example
--- desc: window ROWS, 前10s到当前条,除了current row以外窗口内不包含当前时刻的其他数据
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
```
![Figure 5: window exclude current time](../dql/images/window_exclude_current_time.png)
-### Window Frame Max Size
+### 3. Window with EXCLUDE CURRENT_ROW
+
+Current row do not go into window.
+
+```
+WindowExcludeCurrentRow
+ ::= 'EXCLUDE' 'CURRENT_ROW'
+```
+
+#### Example
+- **ROWS_RANGE Window with EXCLUDE CURRENT_ROW**
+
+```sql
+SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
+WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+```
+![Figure 6: window exclude current time](../dql/images/window_exclude_current_row.png)
+
+### 4. Window with MAXSIZE
+
-OpenMLDB在定义了元素,来限定窗口内条数。具体来说,可以在窗口定义里使用**MAXSIZE**关键字,来限制window内允许的有效窗口内最大数据条数。
+The keyword `MAXSIZE` is used to limit the number of rows in the window.
```sql
WindowFrameMaxSize
- :: = MAXSIZE NumLiteral
+ :: = MAXSIZE NumLiteral
```
-![Figure 6: window config max size](../dql/images/window_max_size.png)
+![Figure 7: window config max size](../dql/images/window_max_size.png)
-#### **Example: ROW RANGE 窗口MAXSIZE**
+#### Example
+- **ROWS_RANGE Window with MAXSIZE**
+The following `WINDOW` clause defines a `ROWS_RANGE` window containing preceding 10s rows and current row. There are at most 3 rows in the window.
```sql
--- ROWS example
--- desc: window ROWS_RANGE, 前10s到当前条,同时限制窗口条数不超过3条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW MAXSIZE 3);
```
```{seealso}
-Aggregate functions that can be used in window calculation, refer to [Built-in Functions](../functions_and_operators/Files/udfs_8h.md)
+Please refer to [Built-in Functions](../functions_and_operators/Files/udfs_8h.md) for aggregate functions that can be used in window computation.
````
diff --git a/docs/en/reference/sql/dql/images/window_exclude_current_row.png b/docs/en/reference/sql/dql/images/window_exclude_current_row.png
new file mode 100644
index 00000000000..0d6b5c8cab4
Binary files /dev/null and b/docs/en/reference/sql/dql/images/window_exclude_current_row.png differ
diff --git a/docs/en/reference/sql/dql/images/window_exclude_current_time.png b/docs/en/reference/sql/dql/images/window_exclude_current_time.png
index a58a0a54fd6..df6f10809e9 100644
Binary files a/docs/en/reference/sql/dql/images/window_exclude_current_time.png and b/docs/en/reference/sql/dql/images/window_exclude_current_time.png differ
diff --git a/docs/en/reference/sql/dql/images/window_max_size.png b/docs/en/reference/sql/dql/images/window_max_size.png
index e15562ddf23..51af41f010b 100644
Binary files a/docs/en/reference/sql/dql/images/window_max_size.png and b/docs/en/reference/sql/dql/images/window_max_size.png differ
diff --git a/docs/en/reference/sql/dql/images/window_union_1_table.png b/docs/en/reference/sql/dql/images/window_union_1_table.png
index ff223682eaf..7fcb9de0522 100644
Binary files a/docs/en/reference/sql/dql/images/window_union_1_table.png and b/docs/en/reference/sql/dql/images/window_union_1_table.png differ
diff --git a/docs/en/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png b/docs/en/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png
index 9e7d0d7aaf4..546d02bee9a 100644
Binary files a/docs/en/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png and b/docs/en/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png differ
diff --git a/docs/en/reference/sql/dql/images/window_union_2_table.png b/docs/en/reference/sql/dql/images/window_union_2_table.png
index fd273b563fa..bfd46944e06 100644
Binary files a/docs/en/reference/sql/dql/images/window_union_2_table.png and b/docs/en/reference/sql/dql/images/window_union_2_table.png differ
diff --git a/docs/en/reference/sql/functions_and_operators/Files/udfs_8h.md b/docs/en/reference/sql/functions_and_operators/Files/udfs_8h.md
index c066b616076..b25bef8d20b 100644
--- a/docs/en/reference/sql/functions_and_operators/Files/udfs_8h.md
+++ b/docs/en/reference/sql/functions_and_operators/Files/udfs_8h.md
@@ -21,16 +21,13 @@ Return the absolute value of expr.
Example:
+```sql
-
-```cpp
SELECT ABS(-32);
-- output 32
```
-
-
**Supported Types**:
* [`bool`]
@@ -57,16 +54,13 @@ Return the arc cosine of expr.
Example:
+```sql
-
-```cpp
SELECT ACOS(1);
-- output 0
```
-
-
**Supported Types**:
* [`number`]
@@ -87,16 +81,13 @@ Compute sum of two arguments.
Example:
+```sql
-
-```cpp
select add(1, 2);
-- output 3
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -134,16 +125,13 @@ Return the arc sine of expr.
Example:
+```sql
-
-```cpp
SELECT ASIN(0.0);
-- output 0.000000
```
-
-
**Supported Types**:
* [`number`]
@@ -156,35 +144,52 @@ at()
**Description**:
-Returns the value of expression from the offset-th row of the ordered partition.
+Returns value evaluated at the row that is offset rows before the current row within the partition. Offset is evaluated with respect to the current row.
**Parameters**:
- * **offset** The number of rows forward from the current row from which to obtain the value.
+ * **offset** The number of rows forwarded from the current row, must not negative
+Note: This function equals the `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)` function.
-Example:
-
-
-| value |
-| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
-| 4 |
+The offset in window is `nth_value()`, not `[lag()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-lag)/at()`. The old `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)`(version < 0.5.0) is start from the last row of window(may not be the current row), it's more like `nth_value()`
+Example:
-```cpp
-SELECT at(value, 3) OVER w;
--- output 3
+| c1 | c2 |
+| -------- | -------- |
+| 0 | 1 |
+| 1 | 1 |
+| 2 | 2 |
+| 3 | 2 |
+| 4 | 2 |
+
+
+```sql
+
+SELECT lag(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
+SELECT at(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
```
-
-
**Supported Types**:
* [`list`, `int64`]
@@ -215,10 +220,9 @@ Return the arc tangent of expr If called with one parameter, this function retur
Example:
+```sql
-
-```cpp
-SELECT ATAN(-0.0);
+SELECT ATAN(-0.0);
-- output -0.000000
SELECT ATAN(0, -0);
@@ -226,8 +230,6 @@ SELECT ATAN(0, -0);
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -258,16 +260,13 @@ Return the arc tangent of Y / X..
Example:
+```sql
-
-```cpp
SELECT ATAN2(0, -0);
-- output 3.141593
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -298,25 +297,22 @@ Compute average of values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT avg(value) OVER w;
-- output 2
```
-
-
**Supported Types**:
* [`list`]
@@ -341,24 +337,21 @@ Compute average of values grouped by category key and output string. Each group
Example:
-| value | catagory |
+| value | catagory |
| -------- | -------- |
-| 0 | x |
-| 1 | y |
-| 2 | x |
-| 3 | y |
-| 4 | x |
-
+| 0 | x |
+| 1 | y |
+| 2 | x |
+| 3 | y |
+| 4 | x |
+```sql
-```cpp
SELECT avg_cate(value, catagory) OVER w;
-- output "x:2,y:2"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -380,33 +373,30 @@ Compute average of values matching specified condition grouped by category key a
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
-
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+```sql
-```cpp
-SELECT avg_cate_where(value, condition, catagory) OVER w;
+SELECT avg_cate_where(catagory, value, condition) OVER w;
-- output "x:2,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`]
@@ -440,25 +430,22 @@ Compute average of values match specified condition.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT avg_where(value, value > 2) OVER w;
-- output 3.5
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -479,16 +466,13 @@ Cast string expression to bool.
Example:
+```sql
-
-```cpp
select bool("true");
-- output true
```
-
-
**Supported Types**:
* [`string`]
@@ -514,16 +498,13 @@ Return the smallest integer value not less than the expr.
Example:
+```sql
-
-```cpp
SELECT CEIL(1.23);
-- output 2
```
-
-
**Supported Types**:
* [`bool`]
@@ -550,21 +531,99 @@ Return the smallest integer value not less than the expr.
Example:
+```sql
-
-```cpp
SELECT CEIL(1.23);
-- output 2
```
-
-
**Supported Types**:
* [`bool`]
* [`number`]
+### function char
+
+```cpp
+char()
+```
+
+**Description**:
+
+Returns the ASCII character having the binary equivalent to expr. If n >= 256 the result is equivalent to char(n % 256).
+
+**Since**:
+0.6.0
+
+
+Example:
+
+```sql
+
+SELECT char(65);
+--output "A"
+```
+
+
+**Supported Types**:
+
+* [`int32`]
+
+### function char_length
+
+```cpp
+char_length()
+```
+
+**Description**:
+
+Returns the length of the string. It is measured in characters and multibyte character string is not supported.
+
+**Since**:
+0.6.0
+
+
+Example:
+
+```sql
+
+SELECT CHAR_LENGTH('Spark SQL ');
+--output 10
+```
+
+
+**Supported Types**:
+
+* [`string`]
+
+### function character_length
+
+```cpp
+character_length()
+```
+
+**Description**:
+
+Returns the length of the string. It is measured in characters and multibyte character string is not supported.
+
+**Since**:
+0.6.0
+
+
+Example:
+
+```sql
+
+SELECT CHAR_LENGTH('Spark SQL ');
+--output 10
+```
+
+
+**Supported Types**:
+
+* [`string`]
+
### function concat
```cpp
@@ -581,16 +640,13 @@ This function returns a string resulting from the joining of two or more string
Example:
+```sql
-
-```cpp
select concat("1", 2, 3, 4, 5.6, 7.8, Timestamp(1590115420000L));
-- output "12345.67.82020-05-22 10:43:40"
```
-
-
**Supported Types**:
* [...]
@@ -611,16 +667,13 @@ Returns a string resulting from the joining of two or more string value in an en
Example:
+```sql
-
-```cpp
select concat_ws("-", "1", 2, 3, 4, 5.6, 7.8, Timestamp(1590115420000L));
-- output "1-2-3-4-5.6-7.8-2020-05-22 10:43:40"
```
-
-
**Supported Types**:
* [`bool`, ...]
@@ -650,17 +703,14 @@ Return the cosine of expr.
Example:
+```sql
-
-```cpp
SELECT COS(0);
-- output 1.000000
```
-
-
* The value returned by [cos()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-cos) is always in the range: -1 to 1.
**Supported Types**:
@@ -688,16 +738,13 @@ Return the cotangent of expr.
Example:
+```sql
-
-```cpp
-SELECT COT(1);
+SELECT COT(1);
-- output 0.6420926159343306
```
-
-
**Supported Types**:
* [`number`]
@@ -725,25 +772,22 @@ Compute number of values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT count(value) OVER w;
-- output 5
```
-
-
**Supported Types**:
* [`list`]
@@ -773,24 +817,21 @@ Compute count of values grouped by category key and output string. Each group is
Example:
-| value | catagory |
+| value | catagory |
| -------- | -------- |
-| 0 | x |
-| 1 | y |
-| 2 | x |
-| 3 | y |
-| 4 | x |
+| 0 | x |
+| 1 | y |
+| 2 | x |
+| 3 | y |
+| 4 | x |
+```sql
-
-```cpp
SELECT count_cate(value, catagory) OVER w;
-- output "x:3,y:2"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -812,33 +853,30 @@ Compute count of values matching specified condition grouped by category key and
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+```sql
-
-```cpp
-SELECT count_cate_where(value, condition, catagory) OVER w;
+SELECT count_cate_where(catagory, value, condition) OVER w;
-- output "x:2,y:1"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`]
@@ -872,29 +910,27 @@ Compute number of values match specified condition.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT count_where(value, value > 2) OVER w;
-- output 2
```
-
-
**Supported Types**:
* [`list`, `list`]
* [`list`, `list`]
+* [`list`, `list`]
* [`list`, `list`]
* [`list`, `list`]
@@ -914,9 +950,8 @@ Cast timestamp or string expression to date.
Example:
+```sql
-
-```cpp
select date(timestamp(1590115420000));
-- output 2020-05-22
select date("2020-05-22");
@@ -924,8 +959,6 @@ select date("2020-05-22");
```
-
-
**Supported Types**:
* [`string`]
@@ -943,15 +976,12 @@ Formats the datetime value according to the format string.
Example:
+```sql
-
-```cpp
select date_format(timestamp(1590115420000),"%Y-%m-%d %H:%M:%S");
--output "2020-05-22 10:43:40"
```
-
-
**Supported Types**:
* [`date`, `string`]
@@ -973,9 +1003,10 @@ Return the day of the month for a timestamp or date.
Note: This function equals the `[day()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-day)` function.
-Example:
+Example:
+
+```sql
-```cpp
select dayofmonth(timestamp(1590115420000));
-- output 22
@@ -984,8 +1015,6 @@ select day(timestamp(1590115420000));
```
-
-
**Supported Types**:
* [`date`]
@@ -1008,9 +1037,10 @@ Return the day of the month for a timestamp or date.
Note: This function equals the `[day()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-day)` function.
-Example:
+Example:
+
+```sql
-```cpp
select dayofmonth(timestamp(1590115420000));
-- output 22
@@ -1019,8 +1049,6 @@ select day(timestamp(1590115420000));
```
-
-
**Supported Types**:
* [`date`]
@@ -1043,16 +1071,15 @@ Return the day of week for a timestamp or date.
Note: This function equals the `[week()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-week)` function.
-Example:
+Example:
+
+```sql
-```cpp
select dayofweek(timestamp(1590115420000));
-- output 6
```
-
-
**Supported Types**:
* [`date`]
@@ -1073,9 +1100,10 @@ Return the day of year for a timestamp or date. Returns 0 given an invalid date.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select dayofyear(timestamp(1590115420000));
-- output 143
@@ -1090,14 +1118,44 @@ select dayofyear(date("2020-05-32"));
```
-
-
**Supported Types**:
* [`date`]
* [`int64`]
* [`timestamp`]
+### function degrees
+
+```cpp
+degrees()
+```
+
+**Description**:
+
+Convert radians to degrees.
+
+**Parameters**:
+
+ * **expr**
+
+
+**Since**:
+0.5.0
+
+
+Example:
+
+```sql
+
+SELECT degrees(3.141592653589793);
+-- output 180.0
+```
+
+
+**Supported Types**:
+
+* [`double`]
+
### function distinct_count
```cpp
@@ -1121,25 +1179,22 @@ Compute number of distinct values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 0 |
-| 2 |
-| 2 |
+| 0 |
+| 0 |
+| 2 |
+| 2 |
| 4 |
+```sql
-
-```cpp
SELECT distinct_count(value) OVER w;
-- output 3
```
-
-
**Supported Types**:
* [`list`]
@@ -1164,16 +1219,13 @@ Cast string expression to double.
Example:
+```sql
-
-```cpp
select double("1.23");
-- output 1.23
```
-
-
**Supported Types**:
* [`string`]
@@ -1197,16 +1249,13 @@ Return the value of e (the base of natural logarithms) raised to the power of ex
0.1.0
+```sql
-
-```cpp
-SELECT EXP(0);
+SELECT EXP(0);
-- output 1
```
-
-
**Supported Types**:
* [`number`]
@@ -1227,7 +1276,7 @@ Returns the value of expr from the first row of the window frame.
@since 0.1.0
```
-**Supported Types**:
+ **Supported Types**:
### function float
@@ -1245,16 +1294,13 @@ Cast string expression to float.
Example:
+```sql
-
-```cpp
select float("1.23");
-- output 1.23
```
-
-
**Supported Types**:
* [`string`]
@@ -1280,16 +1326,13 @@ Return the largest integer value not less than the expr.
Example:
+```sql
-
-```cpp
SELECT FLOOR(1.23);
-- output 1
```
-
-
**Supported Types**:
* [`bool`]
@@ -1311,16 +1354,13 @@ Used by feature zero, for each string value from specified column of window, joi
Example:
+```sql
-
-```cpp
select fz_join(fz_split("k1:v1,k2:v2", ","), " ");
-- "k1:v1 k2:v2"
```
-
-
**Supported Types**:
* [`list`, `string`]
@@ -1398,7 +1438,7 @@ Compute the top1 key's ratio.
@since 0.1.0
```
-**Supported Types**:
+ **Supported Types**:
* [`list`]
* [`list`]
@@ -1421,7 +1461,7 @@ Return the topN keys sorted by their frequency.
@since 0.1.0
```
-**Supported Types**:
+ **Supported Types**:
* [`list`, `list`]
* [`list`, `list`]
@@ -1485,57 +1525,87 @@ Used by feature zero, for each string value from specified column of window, spl
* [`list`, `list`, `list`]
-### function hour
+### function hex
```cpp
-hour()
+hex()
```
**Description**:
-Return the hour for a timestamp.
+Convert number to hexadecimal. If double, convert to hexadecimal after rounding.
**Since**:
-0.1.0
+0.6.0
-Example:
-
-```cpp
-select hour(timestamp(1590115420000));
--- output 10
-```
+Example:
+```sql
+select hex(17);
+--output "11"
+select hex(17.4);
+--output "11"
+select hex(17.5);
+--output "12"
+```
**Supported Types**:
-* [`int64`]
-* [`timestamp`]
+* [`number`]
+* [`string`]
-### function identity
+### function hour
```cpp
-identity()
+hour()
```
**Description**:
-Return value.
+Return the hour for a timestamp.
**Since**:
0.1.0
-Example:
+Example:
+
+```sql
+
+select hour(timestamp(1590115420000));
+-- output 10
+```
+
+
+**Supported Types**:
+
+* [`int64`]
+* [`timestamp`]
+
+### function identity
```cpp
-select identity(1);
--- output 1
+identity()
```
+**Description**:
+
+Return value.
+**Since**:
+0.1.0
+
+
+Example:
+
+```sql
+
+select identity(1);
+-- output 1
+```
**Supported Types**:
@@ -1568,16 +1638,13 @@ If input is not null, return input value; else return default value.
Example:
+```sql
-
-```cpp
-SELECT if_null("hello", "default"), if_null(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -1612,16 +1679,13 @@ If input is not null, return input value; else return default value.
Example:
+```sql
-
-```cpp
-SELECT ifnull("hello", "default"), ifnull(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -1667,9 +1731,10 @@ Rules:
3. case insensitive
4. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
5. if one or more of target, pattern and escape are null values, then the result is null
-Example:
+Example:
+
+```sql
-```cpp
select ilike_match('Mike', 'mi_e', '\\')
-- output: true
@@ -1687,8 +1752,6 @@ select ilike_match('Mi\\ke', 'mi\\_e', string(null))
```
-
-
**Supported Types**:
* [`string`, `string`]
@@ -1708,16 +1771,15 @@ Return expression + 1.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select inc(1);
-- output 2
```
-
-
**Supported Types**:
* [`number`]
@@ -1738,16 +1800,13 @@ Cast string expression to int16.
Example:
+```sql
-
-```cpp
select int16("123");
-- output 123
```
-
-
**Supported Types**:
* [`string`]
@@ -1768,16 +1827,13 @@ Cast string expression to int32.
Example:
+```sql
-
-```cpp
select int32("12345");
-- output 12345
```
-
-
**Supported Types**:
* [`string`]
@@ -1798,16 +1854,13 @@ Cast string expression to int64.
Example:
+```sql
-
-```cpp
select int64("1590115420000");
-- output 1590115420000
```
-
-
**Supported Types**:
* [`string`]
@@ -1876,42 +1929,119 @@ lag()
**Description**:
-Returns the value of expression from the offset-th row of the ordered partition.
+Returns value evaluated at the row that is offset rows before the current row within the partition. Offset is evaluated with respect to the current row.
**Parameters**:
- * **offset** The number of rows forward from the current row from which to obtain the value.
+ * **offset** The number of rows forwarded from the current row, must not negative
+
+
+Note: This function equals the `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)` function.
+The offset in window is `nth_value()`, not `[lag()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-lag)/at()`. The old `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)`(version < 0.5.0) is start from the last row of window(may not be the current row), it's more like `nth_value()`
Example:
-| value |
-| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
-| 4 |
+| c1 | c2 |
+| -------- | -------- |
+| 0 | 1 |
+| 1 | 1 |
+| 2 | 2 |
+| 3 | 2 |
+| 4 | 2 |
+
+
+```sql
+
+SELECT lag(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
+SELECT at(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
+```
+**Supported Types**:
+* [`list`, `int64`]
+* [`list`, `int64`]
+* [`list`, `int64`]
+* [`list`, `int64`]
+* [`list`, `int64`]
+### function last_day
```cpp
-SELECT lag(value, 3) OVER w;
--- output 3
+last_day()
```
+**Description**:
+
+Return the last day of the month to which the date belongs to.
+
+**Since**:
+0.6.1
+
+
+Example:
+
+```sql
+
+select last_day(timestamp("2020-05-22 10:43:40"));
+-- output 2020-05-31
+select last_day(timestamp("2020-02-12 10:43:40"));
+-- output 2020-02-29
+select last_day(timestamp("2021-02-12"));
+-- output 2021-02-28
+```
**Supported Types**:
-* [`list`, `int64`]
-* [`list`, `int64`]
-* [`list`, `int64`]
-* [`list`, `int64`]
-* [`list`, `int64`]
+* [`date`]
+* [`int64`]
+* [`timestamp`]
+
+### function lcase
+
+```cpp
+lcase()
+```
+
+**Description**:
+
+Convert all the characters to lowercase. Note that characters with values > 127 are simply returned.
+
+**Since**:
+0.5.0
+
+
+Example:
+
+```sql
+
+SELECT LCASE('SQl') as str1;
+--output "sql"
+```
+
+
+**Supported Types**:
+
+* [`string`]
### function like_match
@@ -1946,9 +2076,10 @@ Rules:
3. case sensitive
4. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
5. if one or more of target, pattern and escape are null values, then the result is null
-Example:
+Example:
+
+```sql
-```cpp
select like_match('Mike', 'Mi_e', '\\')
-- output: true
@@ -1966,8 +2097,6 @@ select like_match('Mi\\ke', 'Mi\\_e', string(null))
```
-
-
**Supported Types**:
* [`string`, `string`]
@@ -1994,16 +2123,13 @@ Return the natural logarithm of expr.
Example:
+```sql
-
-```cpp
-SELECT LN(1);
+SELECT LN(1);
-- output 0.000000
```
-
-
**Supported Types**:
* [`bool`]
@@ -2031,10 +2157,9 @@ log(base, expr) If called with one parameter, this function returns the natural
Example:
+```sql
-
-```cpp
-SELECT LOG(1);
+SELECT LOG(1);
-- output 0.000000
SELECT LOG(10,100);
@@ -2042,8 +2167,6 @@ SELECT LOG(10,100);
```
-
-
**Supported Types**:
* [`bool`]
@@ -2080,16 +2203,13 @@ Return the base-10 logarithm of expr.
Example:
+```sql
-
-```cpp
-SELECT LOG10(100);
+SELECT LOG10(100);
-- output 2
```
-
-
**Supported Types**:
* [`bool`]
@@ -2116,20 +2236,44 @@ Return the base-2 logarithm of expr.
Example:
+```sql
+
+SELECT LOG2(65536);
+-- output 16
+```
+
+**Supported Types**:
+
+* [`bool`]
+* [`number`]
+
+### function lower
```cpp
-SELECT LOG2(65536);
--- output 16
+lower()
```
+**Description**:
+
+Convert all the characters to lowercase. Note that characters with values > 127 are simply returned.
+**Since**:
+0.5.0
+
+
+Example:
+
+```sql
+
+SELECT LCASE('SQl') as str1;
+--output "sql"
+```
**Supported Types**:
-* [`bool`]
-* [`number`]
+* [`string`]
### function make_tuple
@@ -2167,25 +2311,22 @@ Compute maximum of values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT max(value) OVER w;
-- output 4
```
-
-
**Supported Types**:
* [`list`]
@@ -2213,24 +2354,21 @@ Compute maximum of values grouped by category key and output string. Each group
Example:
-| value | catagory |
+| value | catagory |
| -------- | -------- |
-| 0 | x |
-| 1 | y |
-| 2 | x |
-| 3 | y |
-| 4 | x |
-
+| 0 | x |
+| 1 | y |
+| 2 | x |
+| 3 | y |
+| 4 | x |
+```sql
-```cpp
SELECT max_cate(value, catagory) OVER w;
-- output "x:4,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -2251,33 +2389,31 @@ max_cate_where()
Compute maximum of values matching specified condition grouped by category key and output string. Each group is represented as 'K:V' and separated by comma in outputs and are sorted by key in ascend order.
**Parameters**:
-
+
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
+
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+```sql
-
-```cpp
-SELECT max_cate_where(value, condition, catagory) OVER w;
+SELECT max_cate_where(catagory, value, condition) OVER w;
-- output "x:4,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`]
@@ -2311,25 +2447,22 @@ Compute maximum of values match specified condition.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT max_where(value, value <= 2) OVER w;
-- output 2
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -2361,6 +2494,48 @@ Compute maximum of two arguments.
* [`string`, `string`]
* [`timestamp`, `timestamp`]
+### function median
+
+```cpp
+median()
+```
+
+**Description**:
+
+Compute the median of values.
+
+**Parameters**:
+
+ * **value** Specify value column to aggregate on.
+
+
+**Since**:
+0.6.0
+
+
+
+Example:
+
+
+| value |
+| -------- |
+| 1 |
+| 2 |
+| 3 |
+| 4 |
+
+
+```sql
+
+SELECT median(value) OVER w;
+-- output 2.5
+```
+
+
+**Supported Types**:
+
+* [`list`]
+
### function min
```cpp
@@ -2384,25 +2559,22 @@ Compute minimum of values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT min(value) OVER w;
-- output 0
```
-
-
**Supported Types**:
* [`list`]
@@ -2430,24 +2602,21 @@ Compute minimum of values grouped by category key and output string. Each group
Example:
-| value | catagory |
+| value | catagory |
| -------- | -------- |
-| 0 | x |
-| 1 | y |
-| 2 | x |
-| 3 | y |
-| 4 | x |
+| 0 | x |
+| 1 | y |
+| 2 | x |
+| 3 | y |
+| 4 | x |
+```sql
-
-```cpp
SELECT min_cate(value, catagory) OVER w;
-- output "x:0,y:1"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -2469,33 +2638,31 @@ Compute minimum of values matching specified condition grouped by category key a
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
+
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 1 | true | y |
-| 4 | true | x |
-| 3 | true | y |
-
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 1 | true | y |
+| 4 | true | x |
+| 3 | true | y |
+```sql
-```cpp
-SELECT min_cate_where(value, condition, catagory) OVER w;
+SELECT min_cate_where(catagory, value, condition) OVER w;
-- output "x:0,y:1"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`]
@@ -2529,25 +2696,22 @@ Compute minimum of values match specified condition.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT min_where(value, value > 2) OVER w;
-- output 3
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -2593,16 +2757,15 @@ Return the minute for a timestamp.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select minute(timestamp(1590115420000));
-- output 43
```
-
-
**Supported Types**:
* [`int64`]
@@ -2622,16 +2785,15 @@ Return the month part of a timestamp or date.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select month(timestamp(1590115420000));
-- output 5
```
-
-
**Supported Types**:
* [`date`]
@@ -2660,16 +2822,13 @@ If input is not null, return input value; else return default value.
Example:
+```sql
-
-```cpp
-SELECT if_null("hello", "default"), if_null(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -2705,16 +2864,13 @@ nvl2(expr1, expr2, expr3) - Returns expr2 if expr1 is not null, or expr3 otherwi
Example:
+```sql
-
-```cpp
SELECT nvl2(NULL, 2, 1);
-- output 1
```
-
-
**Supported Types**:
* [`bool`, `bool`, `bool`]
@@ -2785,16 +2941,13 @@ Return the value of expr1 to the power of expr2.
Example:
+```sql
-
-```cpp
SELECT POW(2, 10);
-- output 1024.000000
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -2824,16 +2977,13 @@ Return the value of expr1 to the power of expr2.
Example:
+```sql
-
-```cpp
SELECT POW(2, 10);
-- output 1024.000000
```
-
-
**Supported Types**:
* [`bool`, `bool`]
@@ -2841,6 +2991,150 @@ SELECT POW(2, 10);
* [`number`, `bool`]
* [`number`, `number`]
+### function radians
+
+```cpp
+radians()
+```
+
+**Description**:
+
+Returns the argument X, converted from degrees to radians. (Note that π radians equals 180 degrees.)
+
+**Since**:
+0.6.0
+
+
+Example:
+
+```sql
+
+SELECT RADIANS(90);
+--output 1.570796326794896619231
+```
+
+
+**Supported Types**:
+
+* [`double`]
+
+### function regexp_like
+
+```cpp
+regexp_like()
+```
+
+**Description**:
+
+pattern match same as RLIKE predicate (based on RE2)
+
+**Parameters**:
+
+ * **target** string to match
+ * **pattern** the regular expression match pattern
+ * **flags** specifies the matching behavior of the regular expression function. 'c': case-sensitive matching(default); 'i': case-insensitive matching; 'm': multi-line mode; 'e': Extracts sub-matches(ignored here); 's': Enables the POSIX wildcard character . to match new line.
+
+
+**Since**:
+0.6.1
+
+
+Rules:
+
+1. Accept standard POSIX (egrep) syntax regular expressions
+ * dot (.) : matches any single-width ASCII character in an expression, with the exception of line break characters.
+ * asterisk (*) : matches the preceding token zero or more times.
+ * plus sign (+) : matches the preceding token one or more times.
+ * question mark (?) : identifies the preceding character as being optional.
+ * vertical bar (|) : separates tokens, one of which must be matched, much like a logical OR statement.
+ * parenthesis ('(' and ')') : groups multiple tokens together to disambiguate or simplify references to them.
+ * open square bracket ([) and close square bracket (]) : enclose specific characters or a range of characters to be matched. The characters enclosed inside square brackets are known as a character class.
+ * caret (^) : the caret has two different meanings in a regular expression, depending on where it appears: As the first character in a character class, a caret negates the characters in that character class. As the first character in a regular expression, a caret identifies the beginning of a term. In this context, the caret is often referred to as an anchor character.
+ * dollar sign ($) : as the last character in a regular expression, a dollar sign identifies the end of a term. In this context, the dollar sign is often referred to as an anchor character.
+ * backslash () : used to invoke the actual character value for a metacharacter in a regular expression.
+2. Default flags parameter: 'c'
+3. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
+4. if one or more of target, pattern and flags are null values, then the result is null
+Example:
+
+```sql
+
+select regexp_like('Mike', 'Mi.k')
+-- output: true
+
+select regexp_like('Mi\nke', 'mi.k')
+-- output: false
+
+select regexp_like('Mi\nke', 'mi.k', 'si')
+-- output: true
+
+select regexp_like('append', 'ap*end')
+-- output: true
+```
+
+
+**Supported Types**:
+
+* [`string`, `string`]
+* [`string`, `string`, `string`]
+
+### function replace
+
+```cpp
+replace()
+```
+
+**Description**:
+
+replace(str, search[, replace]) - Replaces all occurrences of `search` with `replace`
+
+**Since**:
+0.5.2
+
+
+if replace is not given or is empty string, matched `search`s removed from final string
+
+Example:
+
+```sql
+
+select replace("ABCabc", "abc", "ABC")
+-- output "ABCABC"
+```
+
+
+**Supported Types**:
+
+* [`string`, `string`]
+* [`string`, `string`, `string`]
+
+### function reverse
+
+```cpp
+reverse()
+```
+
+**Description**:
+
+Returns the reversed given string.
+
+**Since**:
+0.4.0
+
+
+Example:
+
+```sql
+
+SELECT REVERSE('abc') as str1;
+--output "cba"
+```
+
+
+**Supported Types**:
+
+* [`string`]
+
### function round
```cpp
@@ -2862,16 +3156,13 @@ Return the nearest integer value to expr (in floating-point format), rounding ha
Example:
+```sql
-
-```cpp
SELECT ROUND(1.23);
-- output 1
```
-
-
**Supported Types**:
* [`bool`]
@@ -2891,16 +3182,15 @@ Return the second for a timestamp.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select second(timestamp(1590115420000));
-- output 40
```
-
-
**Supported Types**:
* [`int64`]
@@ -2927,17 +3217,14 @@ Return the sine of expr.
Example:
+```sql
-
-```cpp
SELECT SIN(0);
-- output 0.000000
```
-
-
* The value returned by [sin()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-sin) is always in the range: -1 to 1.
**Supported Types**:
@@ -2965,16 +3252,13 @@ Return square root of expr.
Example:
+```sql
-
-```cpp
SELECT SQRT(100);
-- output 10.000000
```
-
-
**Supported Types**:
* [`number`]
@@ -2995,9 +3279,8 @@ Returns 0 if the strings are the same, -1 if the first argument is smaller than
Example:
+```sql
-
-```cpp
select strcmp("text", "text1");
-- output -1
select strcmp("text1", "text");
@@ -3007,8 +3290,6 @@ select strcmp("text", "text");
```
-
-
**Supported Types**:
* [`string`, `string`]
@@ -3029,9 +3310,8 @@ Return string converted from numeric expression.
Example:
+```sql
-
-```cpp
select string(123);
-- output "123"
@@ -3040,8 +3320,6 @@ select string(1.23);
```
-
-
**Supported Types**:
* [`bool`]
@@ -3073,9 +3351,8 @@ Note: This function equals the `[substr()](/reference/sql/functions_and_operator
Example:
+```sql
-
-```cpp
select substr("hello world", 2);
-- output "llo world"
@@ -3085,8 +3362,6 @@ select substring("hello world", 2);
-
-
* If `pos` is positive, the begining of the substring is `pos` charactors from the start of string.
* If `pos` is negative, the beginning of the substring is `pos` characters from the end of the string, rather than the beginning.
@@ -3119,9 +3394,8 @@ Note: This function equals the `[substr()](/reference/sql/functions_and_operator
Example:
+```sql
-
-```cpp
select substr("hello world", 2);
-- output "llo world"
@@ -3131,8 +3405,6 @@ select substring("hello world", 2);
-
-
* If `pos` is positive, the begining of the substring is `pos` charactors from the start of string.
* If `pos` is negative, the beginning of the substring is `pos` characters from the end of the string, rather than the beginning.
@@ -3160,24 +3432,21 @@ Compute sum of values.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT sum(value) OVER w;
-- output 10
```
-
-
**Supported Types**:
* [`list`]
@@ -3203,24 +3472,21 @@ Compute sum of values grouped by category key and output string. Each group is r
Example:
-| value | catagory |
+| value | catagory |
| -------- | -------- |
-| 0 | x |
-| 1 | y |
-| 2 | x |
-| 3 | y |
-| 4 | x |
-
+| 0 | x |
+| 1 | y |
+| 2 | x |
+| 3 | y |
+| 4 | x |
+```sql
-```cpp
SELECT sum_cate(value, catagory) OVER w;
-- output "x:6,y:4"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -3242,32 +3508,30 @@ Compute sum of values matching specified condition grouped by category key and o
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
+
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+```sql
-
-```cpp
-SELECT sum_cate_where(value, condition, catagory) OVER w;
+SELECT sum_cate_where(catagory, value, condition) OVER w;
-- output "x:4,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`]
@@ -3301,25 +3565,22 @@ Compute sum of values match specified condition.
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT sum_where(value, value > 2) OVER w;
-- output 7
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -3345,16 +3606,13 @@ Return the tangent of expr.
Example:
+```sql
-
-```cpp
SELECT TAN(0);
-- output 0.000000
```
-
-
**Supported Types**:
* [`number`]
@@ -3380,24 +3638,19 @@ Supported string style:
* yyyy-mm-dd hh:mm:ss
Example:
+```sql
-```{tip}
-We can use `string()` to make timestamp type values more readable.
-```
-```cpp
-select string(timestamp(1590115420000));
+select timestamp(1590115420000);
-- output 2020-05-22 10:43:40
-select string(timestamp(date("2020-05-22")));
+select date("2020-05-22");
-- output 2020-05-22 00:00:00
-select string(timestamp("2020-05-22 10:43:40"));
+select timestamp("2020-05-22 10:43:40");
-- output 2020-05-22 10:43:40
```
-
-
**Supported Types**:
* [`date`]
@@ -3427,25 +3680,22 @@ Compute top k of values and output string separated by comma. The outputs are so
Example:
-| value |
+| value |
| -------- |
-| 0 |
-| 1 |
-| 2 |
-| 3 |
+| 0 |
+| 1 |
+| 2 |
+| 3 |
| 4 |
+```sql
-
-```cpp
SELECT top(value, 3) OVER w;
-- output "2,3,4"
```
-
-
**Supported Types**:
* [`list`, `list`]
@@ -3468,10 +3718,10 @@ top_n_key_avg_cate_where()
Compute average of values matching specified condition grouped by category key. Output string for top N keys in descend order. Each group is represented as 'K:V' and separated by comma.
**Parameters**:
-
+
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
* **n** Fetch top n keys.
@@ -3479,27 +3729,24 @@ Compute average of values matching specified condition grouped by category key.
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
-| 5 | true | z |
-| 6 | false | z |
-
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+| 5 | true | z |
+| 6 | false | z |
+```sql
-```cpp
SELECT top_n_key_avg_cate_where(value, condition, catagory, 2)
OVER w;
-- output "z:5,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`, `list`]
@@ -3527,9 +3774,9 @@ Compute count of values matching specified condition grouped by category key. Ou
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
* **n** Fetch top n keys.
@@ -3537,27 +3784,24 @@ Compute count of values matching specified condition grouped by category key. Ou
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | true | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | false | x |
-| 5 | true | z |
-| 6 | true | z |
-
+| 0 | true | x |
+| 1 | true | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | false | x |
+| 5 | true | z |
+| 6 | true | z |
+```sql
-```cpp
SELECT top_n_key_count_cate_where(value, condition, catagory, 2)
OVER w;
-- output "z:2,y:2"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`, `list`]
@@ -3585,9 +3829,9 @@ Compute maximum of values matching specified condition grouped by category key.
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
* **n** Fetch top n keys.
@@ -3595,27 +3839,24 @@ Compute maximum of values matching specified condition grouped by category key.
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | false | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | true | x |
-| 5 | true | z |
-| 6 | false | z |
-
+| 0 | true | x |
+| 1 | false | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | true | x |
+| 5 | true | z |
+| 6 | false | z |
+```sql
-```cpp
SELECT top_n_key_max_cate_where(value, condition, catagory, 2)
OVER w;
-- output "z:5,y:3"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`, `list`]
@@ -3643,9 +3884,9 @@ Compute minimum of values matching specified condition grouped by category key.
**Parameters**:
- * **value** Specify value column to aggregate on.
- * **condition** Specify condition column.
* **catagory** Specify catagory column to group by.
+ * **value** Specify value column to aggregate on.
+ * **condition** Specify condition column.
* **n** Fetch top n keys.
@@ -3653,27 +3894,24 @@ Compute minimum of values matching specified condition grouped by category key.
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | true | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | false | x |
-| 5 | true | z |
-| 6 | true | z |
-
+| 0 | true | x |
+| 1 | true | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | false | x |
+| 5 | true | z |
+| 6 | true | z |
+```sql
-```cpp
SELECT top_n_key_min_cate_where(value, condition, catagory, 2)
OVER w;
-- output "z:5,y:1"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`, `list`]
@@ -3701,9 +3939,9 @@ Compute sum of values matching specified condition grouped by category key. Outp
**Parameters**:
+ * **catagory** Specify catagory column to group by.
* **value** Specify value column to aggregate on.
* **condition** Specify condition column.
- * **catagory** Specify catagory column to group by.
* **n** Fetch top n keys.
@@ -3711,27 +3949,24 @@ Compute sum of values matching specified condition grouped by category key. Outp
Example:
-| value | condition | catagory |
+| value | condition | catagory |
| -------- | -------- | -------- |
-| 0 | true | x |
-| 1 | true | y |
-| 2 | false | x |
-| 3 | true | y |
-| 4 | false | x |
-| 5 | true | z |
-| 6 | true | z |
-
+| 0 | true | x |
+| 1 | true | y |
+| 2 | false | x |
+| 3 | true | y |
+| 4 | false | x |
+| 5 | true | z |
+| 6 | true | z |
+```sql
-```cpp
SELECT top_n_key_sum_cate_where(value, condition, catagory, 2)
OVER w;
-- output "z:11,y:4"
```
-
-
**Supported Types**:
* [`list`, `list`, `list`, `list`]
@@ -3768,16 +4003,13 @@ Return the nearest integer that is not greater in magnitude than the expr.
Example:
+```sql
-
-```cpp
SELECT TRUNCATE(1.23);
-- output 1.0
```
-
-
**Supported Types**:
* [`bool`]
@@ -3799,16 +4031,13 @@ Convert all the characters to uppercase. Note that characters values > 127 are s
Example:
+```sql
-
-```cpp
SELECT UCASE('Sql') as str1;
--output "SQL"
```
-
-
**Supported Types**:
* [`string`]
@@ -3829,16 +4058,13 @@ Convert all the characters to uppercase. Note that characters values > 127 are s
Example:
+```sql
-
-```cpp
SELECT UCASE('Sql') as str1;
--output "SQL"
```
-
-
**Supported Types**:
* [`string`]
@@ -3857,9 +4083,10 @@ Return the week of year for a timestamp or date.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select weekofyear(timestamp(1590115420000));
-- output 21
select week(timestamp(1590115420000));
@@ -3867,8 +4094,6 @@ select week(timestamp(1590115420000));
```
-
-
**Supported Types**:
* [`date`]
@@ -3889,9 +4114,10 @@ Return the week of year for a timestamp or date.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select weekofyear(timestamp(1590115420000));
-- output 21
select week(timestamp(1590115420000));
@@ -3899,8 +4125,6 @@ select week(timestamp(1590115420000));
```
-
-
**Supported Types**:
* [`date`]
@@ -3921,16 +4145,15 @@ Return the year part of a timestamp or date.
0.1.0
-Example:
+Example:
+
+```sql
-```cpp
select year(timestamp(1590115420000));
-- output 2020
```
-
-
**Supported Types**:
* [`date`]
diff --git a/docs/en/reference/sql/task_manage/SHOW_JOB.md b/docs/en/reference/sql/task_manage/SHOW_JOB.md
index c4b6e8003cc..bea6739122a 100644
--- a/docs/en/reference/sql/task_manage/SHOW_JOB.md
+++ b/docs/en/reference/sql/task_manage/SHOW_JOB.md
@@ -1,34 +1,36 @@
# SHOW JOB
+The `SHOW JOB` statement is used to display the details of a single job that has been submitted according to the given JOB ID.
+
```SQL
-SHOW JOB;
+SHOW JOB job_id;
```
-The `SHOW JOB` statement displays the details of a single job that has been submitted.
## Example
Submit an online data import task:
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
-
+The output is shown below. The job id of the above command is 1.
+```sql
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
-View jobs with Job ID 1:
-
-```
+Check the job whose Job ID is 1:
+```sql
SHOW JOB 1;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
diff --git a/docs/en/reference/sql/task_manage/SHOW_JOBS.md b/docs/en/reference/sql/task_manage/SHOW_JOBS.md
index 4f0116193d6..aef209ea8e7 100644
--- a/docs/en/reference/sql/task_manage/SHOW_JOBS.md
+++ b/docs/en/reference/sql/task_manage/SHOW_JOBS.md
@@ -1,16 +1,17 @@
# SHOW JOBS
+The `SHOW JOBS` statement displays a list of submitted tasks in the cluster version, including all kinds of jobs in offline mode and `LOAD DATA` jobs in online mode
```SQL
SHOW JOBS;
```
-The `SHOW JOBS` statement displays a list of tasks that have been submitted.
+
## Example
View all current tasks:
-```
+```sql
SHOW JOBS;
---- ---------- ------- ------------ ---------- ----------- --------- ---------------- -------
@@ -20,25 +21,25 @@ SHOW JOBS;
Submit an online data import task:
-```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
-
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
+
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
-View all current tasks:
+View all current tasks again:
-```
+```sql
SHOW JOBS;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
1 row in set
diff --git a/docs/en/reference/sql/task_manage/STOP_JOB.md b/docs/en/reference/sql/task_manage/STOP_JOB.md
index 5e2f8dcb694..7a0eb2025d3 100644
--- a/docs/en/reference/sql/task_manage/STOP_JOB.md
+++ b/docs/en/reference/sql/task_manage/STOP_JOB.md
@@ -1,32 +1,34 @@
# STOP JOB
+The `STOP JOB` statement can stop a given job that has already been submitted according to the JOB ID.
+
+
```SQL
-STOP JOB;
+STOP JOB job_id;
```
-The `STOP JOB` statement stops a single job that has already been submitted.
## Example
-Submit an online data import task:
+Submit an online data import task. The output shows that the JOB ID of this task is 1.
-```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
-Stop with Job ID 1:
+Stop the job whose Job ID is 1:
-```
+```sql
STOP JOB 1;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData STOPPED 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData STOPPED 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
diff --git a/docs/en/tutorial/images/modes-flow-en.png b/docs/en/tutorial/images/modes-flow-en.png
new file mode 100644
index 00000000000..0f72fcd489f
Binary files /dev/null and b/docs/en/tutorial/images/modes-flow-en.png differ
diff --git a/docs/en/tutorial/images/modes-request-en.png b/docs/en/tutorial/images/modes-request-en.png
new file mode 100644
index 00000000000..6ad492178f4
Binary files /dev/null and b/docs/en/tutorial/images/modes-request-en.png differ
diff --git a/docs/en/tutorial/index.rst b/docs/en/tutorial/index.rst
index 38d0af83a93..ec658088b8d 100644
--- a/docs/en/tutorial/index.rst
+++ b/docs/en/tutorial/index.rst
@@ -6,6 +6,8 @@ Tutorials
:maxdepth: 1
standalone_vs_cluster
+ modes
tutorial_sql_1
tutorial_sql_2
- data_import
\ No newline at end of file
+ data_import
+ openmldbspark_distribution
diff --git a/docs/en/tutorial/modes.md b/docs/en/tutorial/modes.md
new file mode 100644
index 00000000000..22c0e985586
--- /dev/null
+++ b/docs/en/tutorial/modes.md
@@ -0,0 +1,136 @@
+# The Workflow of Cluster Version and Execution Mode
+
+OpenMLDB provides different execution modes at different stages of whole feature engineering workflow. Execution modes have been classified in detail to match every working steps, especially for the cluster version that often used in a production environment. You can find out the whole process, from feature extraction to online deployment, and corresponding execution mode in this manual.
+
+
+## 1. Overview of OpenMLDB Workflow
+
+### 1.1 The Whole Workflow of Feature Engineering
+
+The typical feature engineering process based on openMLDB, from feature extraction to online deployment, is as follows.
+
+1. **Offline Data Import**
+
+ Offline data should be imported in this stage for subsequent offline feature extraction.
+
+2. **Offline Feature Development**
+
+ The feature engineering script is developed and optimized until the quality is satisfied. Note that machine learning model development and tunning are involved as well during this step. However this article only focuses on the feature engineering based on OpenMLDB.
+
+3. **Online Deployment for Feature Scripts**
+
+ After obtaining satisfactory feature extraction script, it is deployed online.
+
+4. **Import Data for Cold-start**
+
+ Data in the windows of the online storage engine must be imported before it goes online. For example, if the script will aggregate data for the last three months, the data for those three months needs to be imported for cold-start.
+
+5. **Real-time Data Import**
+
+ After the system being deployed online, the latest data needs to be imported to maintain the window computing logic as time goes by. Therefore, real-time data import is required.
+
+6. **Online Data Preview (Optional)**
+
+ You can preview online data by running SQL commands in this stage.
+
+7. **Request Service in Real Time**
+
+ After the solution is deployed and the input data stream is correctly connected, a real-time feature computing service is ready to respond to real-time requests.
+
+
+
+### 1.2 Overview of Cluster Execution Modes
+
+Since data objects are different in offline and online scenarios, their underlying storage and computing nodes are different. Therefore, OpenMLDB provides different execution modes to complete the processes mentioned in 1.1. The following table summarizes the execution modes used for each step in feature engineering. Important concepts about execution modes will be introduced later.
+
+
+| Stage | Execution Mode | Development Tool | Introduction |
+| ----------------------------------------- | -------------- | -------------------------- | ------------------------------------------------------------ |
+| 1.**Offline Data Import** | the offline mode | CLI | - `LOAD DATA` command
|
+| 2. **Offline Feature Development** | the offline mode | CLI | - all SQL statements of OpenMLDB are supported
- some SQL queries (e.g.,`SELECT`) run in non-blocking asynchronous mode |
+| 3. **Feature Extraction Plan Deployment** | the offline mode | CLI | - `DEPLOY` command |
+| 4. **Import Data for Cold-start** | the online preview mode | CLI, Import Tools | - `LOAD DATA` command for CLI
- or you can use the independent import tool `openmldb-import` |
+| 5. **Real-time Data Import** | the online preview mode | REST APIs, Java/Python SDK | - Data insert APIs of OpenMLDB are called by third-party data sources to import real-time data. |
+| 6. **Online Data Preview (Optional)** | the online preview mode | CLI, Java/Python SDK | - Currently, only `SELECT` on columns, expressions and single-line functions are supported to be used on data preview
- Complex computing functions like `LAST JOIN`, `GROUP BY`, `HAVING`, `WINDOW` are not supported temporarily
|
+| 7. **Real-time Feature Processing** | the online request mode | REST APIs, Java/Python SDK | - all SQL syntax of OpenMLDB is supported
- both REST APIs and Java SDK support single-line and batch request
- Python SDK only support single-line request |
+
+As the table shown above, execution modes can be categorized as `the offline mode`, `the online preview mode` and `the online request mode`. The following figure summarizes the entire feature engineering process and corresponding execution modes. Detailed introduction of each of these modes will be shown later in this page.
+
+![image-20220310170024349](images/modes-flow-en.png)
+
+### 1.3 Notes for the Standalone Version
+
+Although this doc focuses on the cluster version, it is necessary to involve a brief description of standalone version's execution modes. The execution modes of the standalone version are relatively simple. Because the storage nodes and compute nodes are unified for its offline data and online data, standalone version does not distinguish between the offline and online modes. That is, standalone version doesn't have the concept of execution mode using CLI. Any SQL syntax supported by OpenMLDB can directly run on the CLI. Therefore, the standalone version is especially suitable for quick evaluation and learning OpenMLDB SQL. However, in the stage of **real-time feature processing**, the standalone version still run in online request mode, which is the same as the cluster version.
+
+:::{note}
+If you only want to try OpenMLDB in a non-production environment, or learn and practice SQL, it is highly recommended to use the standalone version because of its faster and easier deployment.
+:::
+
+## 2. The Offline Mode
+
+As mentioned earlier, the offline data import, offline feature development and feature extraction deployment stages of cluster version are all running in offline mode. The management and computing of offline data are completed in this mode. Related computing nodes are supported by Spark release version, which has [been optimized for feature engineering by OpenMLDB](./openmldbspark_distribution.md). Common storage systems such as HDFS can be used on the storage nodes.
+
+The offline mode has following main features.
+
+- Offline mode supports all SQL syntax provided by OpenMLDB, including extended and optimized LAST JOIN, WINDOW UNION and other complicated SQL queries.
+
+- In the offline mode, some SQL commands run in non-blocking asynchronous mode. These commands include `LOAD DATA`, `SELECT` and `SELECT INTO` .
+
+- The above-mentioned non-blocking SQL commands is managed by TaskManager, try following commands to check and manage the execution.
+
+ ```bash
+ SHOW JOBS
+ SHOW JOB
+ STOP JOB
+ ```
+
++ :::{Tip}
+ Please pay attention that the `SELECT` command runs asynchronously in offline mode, which is totally different with many common relational database. Therefore, it is highly recommended to use `SELECT INTO` instead of `SELECT` for development and debugging. With `SELECT INTO`, the results can be exported to an external file and checked.
+ :::
+
++ The feature extraction plan deploying command `DEPLOY` executes in offline mode as well.
+:::{note}
+The deployment criterion has certain requirements for SQL, see [The Specification and Requirements of OpenMLDB SQL Deployment](../../reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md) for more details.
+:::
+The offline mode can be set in following ways:
+
+- CLI: `SET @@execute_mode='offline'`
+
+ The default CLI mode is also offline.
+
+- REST APIs, Java/Python SDK: offline mode is not supported.
+
+## 3. The Online Preview Mode
+
+The cold-start's online data import, real-time data import and online data preview are all running in online preview mode. This mode is responsible for online data management and preview. Online data storage and computation is supported by the Tablet.
+
+The online preview mode has following main features.
+
+- In online preview mode, all commands are executed synchronously except the `LOAD DATA` command which is used when importing online data. `LOAD DATA` is executed asynchronously in non-blocking mode as the way it executed in offline mode.
+- In order to view related data, only simple `SELECT ` commands on columns are supported in online preview mode currently, complex SQL queries for this purpose are not supported. As a result, this execution mode is not suitable for SQL feature development and optimizing, whcih should be completed in the offline mode or by standalone version.
+
+The online preview mode can be set in following ways:
+
+- CLI: `SET @@execute_mode='online'`
+- REST APIs, Java/Python SDK: these tools can be executed only in online mode.
+
+## 4. The Online Request mode
+
+After feature extraction plan is deployed and online data is imported, the real-time feature extraction service is ready. Then real-time features can be extracted via request mode, which is supported by REST APIs and Java/Python SDK. The online request mode is OpenMLDB's unique execution mode for supporting real-time computation online, which is distinct from common SQL queries in other databases.
+
+The online request mode requires following three inputs.
+
+1. **SQL feature script**, which is the SQL script used during feature deployment stage, defining the computing logic for feature extraction.
+2. **Online data**, acting as the window data in cold-start stage and real-time imported data after cold-start. Generally, it is the latest data in the time window that is defined by the feature script. For example, the aggregation function of an SQL script defines a time window for the last three months, so the online storage needs to keep the corresponding data for the last three months.
+3. **A real-time request row** , containing real-time behaviors that are currently taking place. The real-time request row is used for real-time feature extraction, such as credit card information in anti-fraud scenarios or search keywords in recommendation scenarios.
+
+Based on these three inputs, the online request mode will return a feature extraction result for each real-time request row. Its computing logic is: the request row will be virtually inserted into the correct position of the online data table according to the logic of SQL script (such as `PARTITION BY`, `ORDER BY`, etc.), and then the feature script will be applied to that request row, finally the result feature will be returned. The following diagram illustrates the procedure of the request mode.
+
+![modes-request](images/modes-request-en.png)
+
+The online request mode can be set in the following ways:
+
+- CLI: The online request mode isn't supported currently.
+- REST APIs: Support single-line and batch **request rows** request, see: [REST APIs](https://openmldb.ai/docs/en/main/quickstart/rest_api.html) for deatil.
+- Java SDK: Support single-line and batch **request rows** request, see: [Java SDK Quickstart ](https://openmldb.ai/docs/en/main/quickstart/java_sdk.html) for detail.
+- Python SDK: Only support single-line **request row** request, see:[Python SDK Quickstart](https://openmldb.ai/docs/en/main/quickstart/python_sdk.html)for detail.
\ No newline at end of file
diff --git a/docs/en/tutorial/openmldbspark_distribution.md b/docs/en/tutorial/openmldbspark_distribution.md
new file mode 100644
index 00000000000..292bc03d4fd
--- /dev/null
+++ b/docs/en/tutorial/openmldbspark_distribution.md
@@ -0,0 +1,91 @@
+# OpenMLDB Spark Distribution
+
+## Overview
+
+The OpenMLDB Spark distribution is a high-performance native Spark version optimized for feature engineering. Like the standard Spark distribution, OpenMLDB Spark provides Scala, Java, Python, and R programming interfaces. Users can use the OpenMLDB Spark in the same way as the standard Spark.
+
+GitHub Repo: https://github.com/4paradigm/Spark/
+
+## Download
+
+You can download the OpenMLDB Spark distribution in the [Release page](https://github.com/4paradigm/Spark/releases) of the repository mentioned above.
+```{note}
+The pre-compiled OpenMLDB Spark distribution is the AllinOne version, which supports Linux and MacOS operating systems. If you have special requirements, you can also download the source code and recompile it.
+```
+
+## Configuration
+OpenMLDB Spark supports [standard Spark configuration](https://spark.apache.org/docs/latest/configuration.html). Furthermore, it has new configuration that can take full advantage of the performance optimization based on the native execution engine.
+### New Configuration of the OpenMLDB Spark Distribution
+
+| Configuration | Function | Default Value | Note |
+|----------------------------------------------|---------------------------------------------------------------------------------|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| spark.openmldb.window.parallelization | It defines whether to enable the window parallelization. | false | Window parallelization can improve the efficiency when there is sufficient computing resource. |
+| spark.openmldb.addIndexColumn.method | It defines the method of adding indexes on columns. | monotonicallyIncreasingId | Options are `zipWithUniqueId`, `zipWithIndex`, `monotonicallyIncreasingId`. |
+| spark.openmldb.concatjoin.jointype | It defines the method of concatenating tables. | inner | Options are `inner`, `left`, `last`. |
+| spark.openmldb.enable.native.last.join | It defines whether to enable the native last join implementation. | true | When the value is `true`, it will have higher performance compared with the implementation based on `LEFT JOIN`. |
+| spark.openmldb.enable.unsaferow.optimization | It defines whether to enable the UnsafeRow memory optimization | false | When the value is `true`, it will use the UnsafeRow format for encoding to improve the performance. However, there are known issues when expressions are complicated. |
+| spark.openmldb.opt.unsaferow.project | It defines whether to enable the UnsafeRow memory optimization on PROJECT nodes. | false | When the value is `true`, it will reduce the overhead of encoding and decoding on PROJECT nodes but there are known issues for complicated expressions. |
+| spark.openmldb.opt.unsaferow.window | It defines whether to enable the UnsafeRow memory optimization on WINDOW nodes. | false | When the value is `true`, it will reduce the overhead of encoding and decoding on WINDOW nodes but there are known issues for complicated expressions. |
+| spark.openmldb.opt.join.spark_expr | It defines whether to use the Spark expression on JOIN clause. | true | When the value is `true`, it will use the Spark expression when processing JOIN clause. There are known issues when expressions are complicated as well. |
+| spark.openmldb.physical.plan.graphviz.path | It is the path that the physical plan image will be exported to. | "" | Image files are not exported by default. |
+
+* If there are multiple window computing tasks and enough resources, it is recommended to set `spark.openmldb.window.parallelization=true` in order to improve resource utilization and reduce runtime.
+* If the JOIN expression is too complicated, the execution may fail by default. It is recommended to set `spark.openmldb.opt.join.spark_expr=false` to ensure the program can run successfully.
+* If there are too many columns in input tables or intermediate tables, you are recommended to enable all three optimization techniques related to `UnsafeRow`, in order to reduce the cost of encoding/decoding and improve the efficiency.
+
+## Usage
+
+### Using Example Jars
+
+The examples in the `Example Jars` can be executed directly after you install the OpenMLDB Spark distribution and set the `SPARK_HOME`.
+
+```java
+export SPARK_HOME=`pwd`/spark-3.2.1-bin-openmldbspark/
+
+$SPARK_HOME/bin/spark-submit \
+ --master local \
+ --class org.apache.spark.examples.sql.SparkSQLExample \
+ $SPARK_HOME/examples/jars/spark-examples*.jar
+```
+
+```{note}
+- SparkSQLExample is an example provided with the standard Spark source code.
+- Some SQL examples use OpenMLDB Spark optimization for higher performance.
+- Some DataFrame examples do not support OpenMLDB Spark optimization.
+```
+### Using PySpark
+
+After installing the OpenMLDB Spark distribution, you can use the standard PySpark for development.
+
+```python
+from pyspark.sql import SparkSession
+from pyspark.sql import Row
+from pyspark.sql.types import *
+
+spark = SparkSession.builder.appName("demo").getOrCreate()
+print(spark.version)
+
+schema = StructType([
+ StructField("name", StringType(), nullable=True),
+ StructField("age", IntegerType(), nullable=True),
+])
+
+rows = [
+ Row("Andy", 20),
+ Row("Berta", 30),
+ Row("Joe", 40)
+]
+
+spark.createDataFrame(spark.sparkContext.parallelize(rows), schema).createOrReplaceTempView("t1")
+spark.sql("SELECT name, age + 1 FROM t1").show()
+
+```
+
+After saving the source file as `openmldbspark_demo.py`, you can use the following command to run the script locally.
+
+```
+${SPARK_HOME}/bin/spark-submit \
+ --master=local \
+ ./openmldbspark_demo.py
+```
+
diff --git a/docs/en/use_case/JD_recommendation_en.md b/docs/en/use_case/JD_recommendation_en.md
new file mode 100644
index 00000000000..d4ff5ee43b9
--- /dev/null
+++ b/docs/en/use_case/JD_recommendation_en.md
@@ -0,0 +1,610 @@
+
+# OpenMLDB + OneFlow: Prediction of Purchase Intention for High Potential Customers
+
+In this article, we will use [JD Prediction of purchase intention for high potential customers problem](https://jdata.jd.com/html/detail.html?id=1) as a demonstration,to show how we can use [OpenMLDB](https://github.com/4paradigm/OpenMLDB) and [OneFlow](https://github.com/Oneflow-Inc/oneflow) together to build a complete machine learning application. Full dataset [download here](https://openmldb.ai/download/jd-recommendation/JD_data.tgz).
+
+
+Extracting patterns from historical data to predict the future purchase intentions, to bring together the most suitable products and customers who need them most, is the key issue in the application of big data in precision marketing, and is also the key technology in digitalization for all e-commerce platforms. As the largest self-operated e-commerce company in China, JD.com has accumulated hundreds of millions of loyal customers and massive amounts of real-life data. This demonstration is based on the real-life data, including real customers, product and behavior data (after desensitization) from Jingdong Mall, and utilizes data mining technology and machine learning algorithm to build a prediction model for user purchase intentions, and output matching results between high-potential customers and target products. This aims to provide high-quality target groups for precision marketing, mine the potential meaning behind the data, and provide e-commerce customers with a simpler, faster and more worry-free shopping experience. In this demonstration, OpenMLDB is used for data mining, and the [DeepFM](https://github.com/Oneflow-Inc/models/tree/main/RecommenderSystems/deepfm) model in OneFlow is used for high-performance training and inference to provide accurate product recommendations.
+
+Note that: (1) this case is based on the OpenMLDB cluster version for tutorial demonstration; (2) this document uses the pre-compiled docker image. If you want to test it in the OpenMLDB environment compiled and built by yourself, you need to configure and use our [Spark Distribution for Feature Engineering Optimization](https://github.com/4paradigm/spark). Please refer to relevant documents of [compilation](https://openmldb.ai/docs/en/main/deploy/compile.html) (Refer to Chapter: "Spark Distribution Optimized for OpenMLDB") and the [installation and deployment documents](https://openmldb.ai/docs/en/main/deploy/install_deploy.html) (Refer to the section: [Deploy TaskManager](https://openmldb.ai/docs/en/main/deploy/install_deploy.html#deploy-taskmanager)).
+
+## 1. Preparation and Preliminary Knowledge
+### 1.1 OneFlow Installation
+OneFlow framework leverage on the great computational power from GPU. Therefore please ensure that the machines for deployment are equipped with NVidia GPUs, and ensure the driver version is >=460.X.X [driver version support for CUDA 11.0](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions).
+Install OneFlow with the following commands:
+```bash
+conda activate oneflow
+python3 -m pip install -f https://staging.oneflow.info/branch/master/cu112 --pre oneflow
+```
+In addition, following Python packages need to be installed:
+```bash
+pip install psutil petastorm pandas sklearn
+```
+ Pull Oneflow-serving docker image:
+```bash
+docker pull oneflowinc/oneflow-serving:nightly
+```
+```{note}
+Note that we are installing Oneflow nightly versions here. The versions tested in this guide are as follows:
+Oneflow:https://github.com/Oneflow-Inc/oneflow/tree/fcf205cf57989a5ecb7a756633a4be08444d8a28
+Oneflow-serving:https://github.com/Oneflow-Inc/serving/tree/ce5d667468b6b3ba66d3be6986f41f965e52cf16
+```
+
+
+### 1.2 Pull and Start the OpenMLDB Docker Image
+- Note: Please make sure that the Docker Engine version number is > = 18.03
+- Pull the OpenMLDB docker image and run the corresponding container
+- Download demo files, and map the demo directory to `/root/project`, here we use `demodir=/home/gtest/demo`. The demo files include scripts and sample training data required for this case.
+```bash
+export demodir=/home/gtest/demo
+docker run -dit --name=demo --network=host -v $demodir:/root/project 4pdosc/openmldb:0.5.2 bash
+docker exec -it demo bash
+```
+- The image is preinstalled with OpenMLDB and some third-party libraries and tools, we need to install the dependencies of OneFlow.
+
+Since we embed the data pre-processing and invoking of OneFlow serving in the OpenMLDB docker, following dependencies needs to be installed.
+```bash
+pip install tritonclient[all] xxhash geventhttpclient
+```
+
+```{note}
+Note that all the commands for OpenMLDB part below run in the docker container by default. All the commands for OneFlow are to run in the environment as installed in 1.1.
+```
+
+
+### 1.3 Initialize Environment
+
+```bash
+./init.sh
+```
+We provide the init.sh script in the image that helps users to quickly initialize the environment including:
+- Configure zookeeper
+- Start cluster version OpenMLDB
+
+### 1.4 Start OpenMLDB CLI Client
+```bash
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+```
+```{note}
+Note that most of the commands in this tutorial are executed under the OpenMLDB CLI. In order to distinguish from the ordinary shell environment, the commands executed under the OpenMLDB CLI use a special prompt of >.
+```
+
+### 1.5 Preliminary Knowledge: Non-Blocking Task of Cluster Version
+Some commands in the cluster version are non-blocking tasks, including `LOAD DATA` in online mode and `LOAD DATA`, `SELECT`, `SELECT INTO` commands in the offline mode. After submitting a task, you can use relevant commands such as `SHOW JOBS` and `SHOW JOB` to view the task progress. For details, see the offline task management document.
+
+## 2. Machine Learning Process Based on OpenMLDB and OneFlow
+
+### 2.1 Overview
+Machine learning with OpenMLDB and OneFlow can be summarized into a few main steps. We will detail each step in the following sections.
+
+### 2.2 Offline feature extraction with OpenMLDB
+#### 2.2.1 Creating Databases and Data Tables
+The following commands are executed in the OpenMLDB CLI environment.
+```sql
+> CREATE DATABASE JD_db;
+> USE JD_db;
+> CREATE TABLE action(reqId string, eventTime timestamp, ingestionTime timestamp, actionValue int);
+> CREATE TABLE flattenRequest(reqId string, eventTime timestamp, main_id string, pair_id string, user_id string, sku_id string, time bigint, split_id int, time1 string);
+> CREATE TABLE bo_user(ingestionTime timestamp, user_id string, age string, sex string, user_lv_cd string, user_reg_tm bigint);
+> CREATE TABLE bo_action(ingestionTime timestamp, pair_id string, time bigint, model_id string, type string, cate string, br string);
+> CREATE TABLE bo_product(ingestionTime timestamp, sku_id string, a1 string, a2 string, a3 string, cate string, br string);
+> CREATE TABLE bo_comment(ingestionTime timestamp, dt bigint, sku_id string, comment_num int, has_bad_comment string, bad_comment_rate float);
+```
+You can also use sql script to execute (`/root/project/create_tables.sql`) as shown below:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/create_tables.sql
+```
+
+#### 2.2.2 Offline Data Preparation
+First, you need to switch to offline execution mode. Next, import the sample data as offline data for offline feature calculation.
+
+The following commands are executed under the OpenMLDB CLI.
+
+```sql
+> USE JD_db;
+> SET @@execute_mode='offline';
+> LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='overwrite');
+```
+or use script to execute, and check the job status with the following commands:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_data.sql
+
+echo "show jobs;" | /work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+```
+
+
+```{note}
+Note that `LOAD DATA` is a non-blocking task. You can use the command `SHOW JOBS` to view the running status of the task. Please wait for the task to run successfully (`state` to `FINISHED` status) before proceeding to the next step.
+```
+
+#### 2.2.3 The Feature Extraction Script
+Usually, users need to analyse the data according to the goal of machine learning before designing the features, and then design and investigate the features according to the analysis. Data analysis and feature research of the machine learning are not the scope of this demo, and we will not expand it. We assume that users already have the basic theoretical knowledge of machine learning, the ability to solve machine learning problems, the ability to understand SQL syntax, and the ability to use SQL syntax to construct features. For this case, we have designed several features after the analysis and research.
+
+#### 2.2.4 Offline Feature Extraction
+In the offline mode, the user extracts features and outputs the feature results to `'/root/project/out/1`(mapped to`$demodir/out/1`) that is saved in the data directory for subsequent model training. The `SELECT` command corresponds to the SQL feature extraction script generated based on the above table. The following commands are executed under the OpenMLDB CLI.
+```sql
+> USE JD_db;
+> select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17
+INTO OUTFILE '/root/project/out/1';
+```
+Since there is only one command, we can directly execute the sql script `sync_select_out.sql`:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/sync_select_out.sql
+```
+```{note}
+Note that the cluster version `SELECT INTO` is a non-blocking task. You can use the command `SHOW JOBS` to view the running status of the task. Please wait for the task to run successfully (`state` to `FINISHED` status) before proceeding to the next step.
+```
+### 2.3 Pre-process Dataset to Match DeepFM Model Requirements
+```{note}
+Note that following commands are executed outside the demo docker. They are executed in the environment as installed in section 1.1.
+```
+According to [DeepFM paper](https://arxiv.org/abs/1703.04247), we treat both categorical and continuous features as sparse features.
+
+> χ may include categorical fields (e.g., gender, location) and continuous fields (e.g., age). Each categorical field is represented as a vector of one-hot encoding, and each continuous field is represented as the value itself, or a vector of one-hot encoding after discretization.
+
+Change directory to demo directory and execute the following commands to process the data set.
+```bash
+cd $demodir/openmldb_process/
+bash process_JD_out_full.sh $demodir/out/1
+```
+The generated dataset will be placed at `$demodir/openmldb_process/out`. After generating parquet dataset, dataset information will also be printed. It contains the information about the number of samples and table size array, which is needed when training.
+```
+train samples = 11073
+val samples = 1351
+test samples = 1492
+table size array:
+4,26,16,4,11,809,1,1,5,3,17,16,7,13916,13890,13916,10000,3674,9119,7,2,13916,5,4,4,33,2,2,7,2580,3,5,13916,10,47,13916,365,17,132,32,37
+```
+
+### 2.4 Launch OneFlow for Model Training
+```{note}
+Note that following commands are executed in the environment as installed in section 1.1.
+```
+#### 2.4.1 Update `train_deepfm.sh` Configuration File
+The dataset information generated from the previous section need to be updated in the configuration file,including `num_train_samples`,`num_val_samples`,`num_test_samples` and `table_size_array`.
+```bash
+cd $demodir/oneflow_process/
+```
+```bash
+#!/bin/bash
+DEVICE_NUM_PER_NODE=1
+demodir="$1"
+DATA_DIR=$demodir/openmldb_process/out
+PERSISTENT_PATH=/$demodir/oneflow_process/persistent
+MODEL_SAVE_DIR=$demodir/oneflow_process/model_out
+MODEL_SERVING_PATH=$demodir/oneflow_process/model/embedding/1/model
+
+python3 -m oneflow.distributed.launch \
+--nproc_per_node $DEVICE_NUM_PER_NODE \
+--nnodes 1 \
+--node_rank 0 \
+--master_addr 127.0.0.1 \
+deepfm_train_eval_JD.py \
+--disable_fusedmlp \
+--data_dir $DATA_DIR \
+--persistent_path $PERSISTENT_PATH \
+--table_size_array "4,26,16,4,11,809,1,1,5,3,17,16,7,13916,13890,13916,10000,3674,9119,7,2,13916,5,4,4,33,2,2,7,2580,3,5,13916,10,47,13916,365,17,132,32,37" \
+--store_type 'cached_host_mem' \
+--cache_memory_budget_mb 1024 \
+--batch_size 1000 \
+--train_batches 75000 \
+--loss_print_interval 100 \
+--dnn "1000,1000,1000,1000,1000" \
+--net_dropout 0.2 \
+--learning_rate 0.001 \
+--embedding_vec_size 16 \
+--num_train_samples 11073 \
+--num_val_samples 1351 \
+--num_test_samples 1492 \
+--model_save_dir $MODEL_SAVE_DIR \
+--save_best_model \
+--save_graph_for_serving \
+--model_serving_path $MODEL_SERVING_PATH \
+--save_model_after_each_eval
+```
+#### 2.4.2 Start Model Training
+```bash
+bash train_deepfm.sh $demodir
+```
+Trained model will be saved in `$demodir/oneflow_process/model_out`, saved model for serving will be saved in `$demodir/oneflow_process/model/embedding/1/model`.
+
+## 3. Model Serving
+### 3.1 Overview
+Model serving with OpenMLDB+OneFlow can be summarized into a few main steps. We will detail each step in the following sections.
+
+### 3.2 Configure OpenMLDB for Online Feature Extraction
+
+#### 3.2.1 Online SQL Deployment
+Assuming that the model produced by the features designed in Section 2.2.3 in the previous model training meets the expectation. The next step is to deploy the feature extraction SQL script online to provide real-time feature extraction.
+
+1. Restart OpenMLDB CLI for SQL online deployment.
+ ```bash
+ docker exec -it demo bash
+ /work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+ ```
+2. To execute online deployment, the following commands are executed in OpenMLDB CLI.
+```sql
+> USE JD_db;
+> SET @@execute_mode='online';
+> deploy demo select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17;
+```
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/deploy.sql
+```
+
+Use the following command to check the deployment details:
+```sql
+show deployment demo;
+```
+#### 3.2.2 Online Data Import
+We need to import the data for real-time feature extraction. First, you need to switch to **online** execution mode. Then, in the online mode, import the sample data as the online data source. The following commands are executed under the OpenMLDB CLI.
+```sql
+> USE JD_db;
+> SET @@execute_mode='online';
+> LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='append');
+```
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_online_data.sql
+```
+```{note}
+Note that the cluster version `LOAD DATA` is a non-blocking task. You can use the command `SHOW JOBS` to view the running status of the task. Please wait for the task to run successfully (`state` to `FINISHED` status) before proceeding to the next step.
+```
+
+### 3.3 Configure OneFlow Model Serving
+
+#### 3.3.1 Check Model Path (`$demodir/oneflow_process/model`)
+Check if model files are correctly organized and saved as shown below:
+```
+$ tree -L 5 model/
+model/
+└── embedding
+ ├── 1
+ │ └── model
+ │ ├── model.mlir
+ │ ├── module.dnn_layer.linear_layers.0.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.0.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.12.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.12.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.15.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.15.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.3.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.3.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.6.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.6.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.9.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.9.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.embedding_layer.one_embedding.shadow
+ │ │ ├── meta
+ │ │ └── out
+ │ └── one_embedding_options.json
+ └── config.pbtxt
+ ```
+
+#### 3.3.2 Check `config.pbtxt` configurations.
+```
+name: "embedding"
+backend: "oneflow"
+max_batch_size: 10000
+input [
+ {
+ name: "INPUT_0"
+ data_type: TYPE_INT64
+ dims: [ 41 ]
+ }
+]
+output [
+ {
+ name: "OUTPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+instance_group [
+ {
+ count: 1
+ kind: KIND_GPU
+ gpus: [ 0 ]
+ }
+]
+ ```
+Field `name` in `config.pbtxt` should be consistent with the name of the folder.
+
+#### 3.3.3 Change persistent path
+change persistent table path in `one_embedding_options.json`. Change `embedding/kv_options/kv_store/persistent_table/path` to persistent table location in docker `/root/demo/persistent`.
+```
+{
+ "embedding": [
+ {
+ "snapshot": "2022-09-29-03-27-44-953674",
+ "kv_options": {
+ "name": "sparse_embedding",
+ "key_type_size": 8,
+ "value_type_size": 4,
+ "value_type": "oneflow.float32",
+ "storage_dim": 51,
+ "kv_store": {
+ "caches": [
+ {
+ "policy": "lru",
+ "cache_memory_budget_mb": 1024,
+ "value_memory_kind": "device"
+ },
+ {
+ "policy": "full",
+ "capacity": 110477,
+ "value_memory_kind": "host"
+ }
+ ],
+ "persistent_table": {
+ "path": "/root/demo/persistent",
+ "physical_block_size": 4096,
+ "capacity_hint": 110477
+ }
+ },
+ "parallel_num": 1
+ }
+ }
+ ]
+}
+```
+
+### 3.4 Start Serving
+#### 3.4.1 Start OneFlow Model Serving
+```{note}
+Note that following commands are executed in the environment as installed in section 1.1.
+```
+Start OneFlow model serving with the following commands:
+```bash
+docker run --runtime=nvidia --rm --network=host \
+ -v $demodir/oneflow_process/model:/models \
+ -v $demodir/oneflow_process/persistent:/root/demo/persistent \
+ oneflowinc/oneflow-serving:nightly \
+ bash -c '/opt/tritonserver/bin/tritonserver --model-repository=/models'
+ --model-repository=/models --backend-directory=/backends'
+```
+If sucessful, the output will look like the following:
+'''
+...
+I0929 07:28:34.281655 1 grpc_server.cc:4117] Started GRPCInferenceService at 0.0.0.0:8001
+I0929 07:28:34.282343 1 http_server.cc:2815] Started HTTPService at 0.0.0.0:8000
+I0929 07:28:34.324662 1 http_server.cc:167] Started Metrics Service at 0.0.0.0:8002
+
+'''
+
+#### 3.4.2 Start OpenMLDB Serving
+```{note}
+Note that the following commands are executed in demo docker.
+```
+OpenMLDB online feature extraction has been deployed, and oneflow model serving is started. This demo connects both services. After receiving real-time requests, OpenMLDB service is firstly enaged for feature extraction. After which the extracted features are passed to oneflow model serving for inference.
+1. If you have not exited the OpenMLDB CLI, use the `quit` command to exit the OpenMLDB CLI.
+2. Start the prediction service from the command line:
+```bash
+cd /root/project/serving/openmldb_serving
+./start_predict_server.sh 0.0.0.0:9080
+```
+
+### 3.5 Send Real-Time Request
+Requests can be executed outside the OpenMLDB docker. The details can be found in [IP Configuration](https://openmldb.ai/docs/en/main/reference/ip_tips.html).
+
+Execute `predict.py` in command window. This script will send a line of request data to the prediction service. Results will be received and printed out.
+
+```bash
+python $demodir/serving/predict.py
+```
+Sample output:
+```
+----------------ins---------------
+['200001_80005_2016-03-31 18:11:20' 1459419080000
+ '200001_80005_2016-03-31 18:11:20' '200001_80005' '80005' '200001' 1 1.0
+ 1.0 1 1 5 1 '200001_80005_2016-03-31 18:11:20' None None None None None
+ None None None None None None '200001_80005_2016-03-31 18:11:20'
+ 0.019200000911951065 0.0 0.0 2 2 '1,,NULL' '4,0,NULL'
+ '200001_80005_2016-03-31 18:11:20' ',NULL,NULL' ',NULL,NULL' ',NULL,NULL'
+ 1 1 1 ',NULL,NULL' ',NULL,NULL']
+---------------predict change of purchase -------------
+[[b'0.006222:0']]
+```
diff --git a/docs/en/use_case/OpenMLDB_Byzer_taxi.md b/docs/en/use_case/OpenMLDB_Byzer_taxi.md
new file mode 100644
index 00000000000..9554f77ea87
--- /dev/null
+++ b/docs/en/use_case/OpenMLDB_Byzer_taxi.md
@@ -0,0 +1,276 @@
+# Build End-to-end Machine Learning Applications Based on SQL (OpenMLDB + Byzer)
+
+This tutorial will show you how to complete a machine learning workflow with the help of [OpenMLDB](https://github.com/4paradigm/OpenMLDB) and [Byzer](https://www.byzer.org/home).
+OpenMLDB will compute real-time features based on the data and queries from Byzer, and then return results to Byzer for subsequent model training and inference.
+
+## 1. Preparations
+
+### 1.1 Install OpenMLDB
+
+1. The demo will use the OpenMLDB cluster version running in Docker. See [OpenMLDB Quickstart](../quickstart/openmldb_quickstart.md) for detail installation procedures.
+2. Please modify the OpenMLDB IP configuration in order to enable the Byzer engine to access the OpenMLDB service out of the container. See [IP Configuration](../reference/ip_tips.md) for detail guidance.
+
+### 1.2 Install the Byzer Engine and the Byzer Notebook
+
+1. For detail installation procedures of Byzer engine, see [Byzer Language Doc](https://docs.byzer.org/#/byzer-lang/en-us/).
+
+2. We have to use the [OpenMLDB plugin](https://github.com/byzer-org/byzer-extension/tree/master/byzer-openmldb) developed by Byzer to transmit messages between two platforms. To use a plugin in Byzer, please configure `streaming.datalake.path`, see [the manual of Byzer Configuration](https://docs.byzer.org/#/byzer-lang/zh-cn/installation/configuration/byzer-lang-configuration) for detail.
+
+3. Byzer Notebook is used in this demo. Please install it after the installation of Byzer engine. You can also use the [VSCode Byzer plugin](https://docs.byzer.org/#/byzer-lang/zh-cn/installation/vscode/byzer-vscode-extension-installation) to connect your Byzer engine. The interface of Byzer Notebook is shown below, see [Byzer Notebook Doc](https://docs.byzer.org/#/byzer-notebook/zh-cn/) for more about it.
+
+![Byzer_Notebook](images/Byzer_Notebook.jpg)
+
+
+### 1.3 Dataset Preparation
+In this case, the dataset comes from the Kaggle taxi trip duration prediction problem. If it is not in your Byzer `Deltalake`, [download](https://www.kaggle.com/c/nyc-taxi-trip-duration/overview) it first. Please remember to import it into Byzer Notebook after download.
+
+
+## 2. The Workflow of Machine Learning
+
+### 2.1 Load the Dataset
+
+Please import the origin dataset into the `File System` of Byzer Notebook, it will automatically generate the storage path `tmp/upload`.
+Use the `load` Byzer Lang command as below to load this dataset.
+```sql
+load csv.`tmp/upload/train.csv` where delimiter=","
+and header = "true"
+as taxi_tour_table_train_simple;
+```
+
+### 2.2 Import the Dataset into OpenMLDB
+
+Install the OpenMLDB plugin in Byzer.
+
+```sql
+!plugin app add - "byzer-openmldb-3.0";
+```
+
+Now you can use this plugin to connect OpenMLDB. **Please make sure the OpenMLDB engine has started and there is a database named `db1` before you run the following code block in Byzer Notebook.**
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='offline';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+CREATE TABLE t1(id string, vendor_id int, pickup_datetime timestamp, dropoff_datetime timestamp, passenger_count int, pickup_longitude double, pickup_latitude double, dropoff_longitude double, dropoff_latitude double, store_and_fwd_flag string, trip_duration int);
+'''
+and `sql-3`='''
+LOAD DATA INFILE 'tmp/upload/train.csv'
+INTO TABLE t1 options(format='csv',header=true,mode='append');
+'''
+and db="db1"
+and action="ddl";
+```
+
+```{note}
+1. The port number of zkAddress should correspond with the files' IP configuration under the OpenMLDB `conf/` path.
+2. You can check the `streaming.plugin.clzznames` of the `\byzer.properties.override` file, which is under the `$BYZER_HOME\conf` path of Byzer, to see if the `byzer-openmldb-3.0` plugin is successfully installed. You can see the main class name `tech.mlsql.plugins.openmldb.ByzerApp` after installation.
+3. If the plugin installation fail, download the `.jar` files and [install it offline](https://docs.byzer.org/#/byzer-lang/zh-cn/extension/installation/offline_install).
+```
+
+### 2.3 Real-time Feature Extractions
+
+The features developed in the [OpenMLDB + LightGBM: Taxi Trip Duration Prediction](./lightgbm_demo.md) Section 2.3 will be used in this demo.
+The processed data will be exported to a local `csv` file.
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='offline';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+SELECT trp_duration, passanger_count,
+sum(pickup_latitude) OVER w AS vendor_sum_pl,
+max(pickup_latitude) OVER w AS vendor_max_pl,
+min(pickup_latitude) OVER w AS vendor_min_pl,
+avg(pickup_latitude) OVER W AS vendor_avg_pl,
+sum(pickup_latitude) OVER w2 AS pc_sum_pl,
+max(pickup_latitude) OVER w2 AS pc_max_pl,
+min(pickup_latitude) OVER w2 AS pc_min_pl,
+avg(pickup_latitude) OVER w2 AS pc_avg_pl,
+count(vendor_id) OVER w2 AS pc_cnt,
+count(vendor_id) OVER w AS vendor_cnt
+FROM t1
+WINDOW w AS(PARTITION BY vendor_id ORDER BY ickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+w2 AS(PARTITION BY passenger_count ORDER BY pickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW) INTO OUTFILE '/tmp/feature_data';
+'''
+and db="db1"
+and action="ddl";
+```
+
+
+
+### 2.4 Data Vectorization
+Convert all `int` type fields to `double` in Byzer Notebook.
+
+```sql
+select *,
+cast(passenger_count as double) as passenger_count_d,
+cast(pc_cnt as double) as pc_cnt_d,
+cast(vendor_cnt as double) as vendor_cnt_d
+from feature_data
+as new_feature_data;
+```
+
+Then merge all the fields into a vector.
+
+```sql
+select vec_dense(array(
+passenger_count_d,
+vendor_sum_pl,
+vendor_max_pl,
+vendor_min_pl,
+vendor_avg_pl,
+pc_sum_pl,
+pc_max_pl,
+pc_min_pl,
+pc_avg_pl,
+pc_cnt_d,
+vendor_cnt
+)) as features,cast(trip_duration as double) as label
+from new_feature_data
+as trainning_table;
+
+```
+
+
+
+### 2.5 Training
+
+Use the `train` Byzer Lang command and its [built-in Linear Regression Algorithm](https://docs.byzer.org/#/byzer-lang/zh-cn/ml/algs/linear_regression) to train the model, and save it to `/model/tax-trip`.
+
+```sql
+train trainning_table as LinearRegression.`/model/tax-trip` where
+
+keepVersion="true"
+
+and evaluateTable="trainning_table"
+and `fitParam.0.labelCol`="label"
+and `fitParam.0.featuresCol`= "features"
+and `fitParam.0.maxIter`="50";
+
+```
+
+```{note}
+To check the parameters of Byzer's inbuilt Linear Regression Algorithm, please use `!show et/params/LinearRegression;` command.
+```
+
+### 2.6 Feature Deployment
+
+Deploy the feature extraction script onto OpenMLDB: copy the best performance code and set the `execute_mode` to `online`.
+The following example uses the code the same as that in the feature extraction, which might not be the 'best'.
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='online';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+SELECT trp_duration, passanger_count,
+sum(pickup_latitude) OVER w AS vendor_sum_pl,
+max(pickup_latitude) OVER w AS vendor_max_pl,
+min(pickup_latitude) OVER w AS vendor_min_pl,
+avg(pickup_latitude) OVER W AS vendor_avg_pl,
+sum(pickup_latitude) OVER w2 AS pc_sum_pl,
+max(pickup_latitude) OVER w2 AS pc_max_pl,
+min(pickup_latitude) OVER w2 AS pc_min_pl,
+avg(pickup_latitude) OVER w2 AS pc_avg_pl,
+count(vendor_id) OVER w2 AS pc_cnt,
+count(vendor_id) OVER w AS vendor_cnt
+FROM t1
+WINDOW w AS(PARTITION BY vendor_id ORDER BY ickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+w2 AS(PARTITION BY passenger_count ORDER BY pickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW) INTO OUTFILE '/tmp/feature_data_test';
+'''
+and db="db1"
+and action="ddl";
+
+```
+
+Import the online data: the following example uses the test set from Kaggle, real-time data source can be connected instead in production.
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='online';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+CREATE TABLE t1(id string, vendor_id int, pickup_datetime timestamp, dropoff_datetime timestamp, passenger_count int, pickup_longitude double, pickup_latitude double, dropoff_longitude double, dropoff_latitude double, store_and_fwd_flag string, trip_duration int);
+'''
+and `sql-3`='''
+LOAD DATA INFILE 'tmp/upload/test.csv'
+INTO TABLE t1 options(format='csv',header=true,mode='append');
+'''
+and db="db1"
+and action="ddl";
+```
+
+
+
+### 2.7 Model Deployment
+
+Register the previously trained and saved model as a UDF function in Byzer Notebook in order to use it more conveniently.
+
+```sql
+register LinearRegression.`/model/tax-trip` as tax_trip_model_predict;
+```
+
+### 2.8 Prediction
+
+Convert all `int` type fields of the online dataset, after processed by OpenMLDB, to `double`.
+
+```sql
+select *,
+cast(passenger_count as double) as passenger_count_d,
+cast(pc_cnt as double) as pc_cnt_d,
+cast(vendor_cnt as double) as vendor_cnt_d
+from feature_data_test
+as new_feature_data_test;
+```
+
+Then merge all the fields into a vector.
+
+
+```sql
+select vec_dense(array(
+passenger_count_d,
+vendor_sum_pl,
+vendor_max_pl,
+vendor_min_pl,
+vendor_avg_pl,
+pc_sum_pl,
+pc_max_pl,
+pc_min_pl,
+pc_avg_pl,
+pc_cnt_d,
+vendor_cnt
+)) as features,
+from new_feature_data_test
+as testing_table;
+```
+
+Use this processed test set to predict.
+
+```sql
+select tax_trip_model_predict(testing_table) as predict_label;
+```
+
+
+
+
+
diff --git a/docs/en/use_case/dolphinscheduler_task_demo.md b/docs/en/use_case/dolphinscheduler_task_demo.md
index ded346db1fd..4039321cbb0 100644
--- a/docs/en/use_case/dolphinscheduler_task_demo.md
+++ b/docs/en/use_case/dolphinscheduler_task_demo.md
@@ -28,89 +28,124 @@ In addition to the feature engineering done by OpenMLDB, the prediction also req
## Demo
### Configuration
-The demo can run on MacOS or Linux, or use the OpenMLDB docker image provided by us:
+
+** Use OpenMLDB docker image**
+
+The demo can run on MacOS or Linux, the OpenMLDB docker image is recommended. We'll start OpenMLDB and DolphinScheduler in the same container, expose the DolphinScheduler web port:
```
-docker run -it 4pdosc/openmldb:0.5.1 bash
+docker run -it -p 12345:12345 4pdosc/openmldb:0.6.3 bash
```
-
```{attention}
The DolphinScheduler requires a user of the operating system with `sudo` permission. Therefore, it is recommended to download and start the DolphinScheduler in the OpenMLDB container. Otherwise, please prepare the operating system user with sudo permission.
```
+The docker image doesn't have sudo, but DolphinScheduler needs it in runtime. So install it:
+```
+apt update && apt install sudo
+```
+
+And DolphinScheduler task running uses sh, but the docker image default sh is `dash`. Change it to `bash`:
+```
+dpkg-reconfigure dash
+```
+And eneter `no`.
+
+**Start OpenMLDB Cluster and Predict Server**
+
In the container, you can directly run the following command to start the OpenMLDB cluster.
```
./init.sh
```
-We will complete a workflow of importing data, offline training, and deploying the SQL and model online after successful training. For the online part of the model, you can use a simple predict server. See [predict server source](https://raw.githubusercontent.com/4paradigm/OpenMLDB/main/demo/talkingdata-adtracking-fraud-detection/predict_server.py). You can download it locally and run it in the background:
+We will complete a workflow of importing data, offline training, and deploying the SQL and model online after successful training. For the online part of the model, you can use the simple predict server in `/work/talkingdata`. Run it in the background:
```
-python3 predict_server.py --no-init > predict.log 2>&1 &
+python3 /work/talkingdata/predict_server.py --no-init > predict.log 2>&1 &
```
-Note that, DolphinScheduler has not officially released the updated version supporting OpenMLDB Task (only on the `dev` branch), so please download [dolphinscheduler-bin](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz) that is prepared by us to have the DolphinScheduler version supporting OpenMLDB Task.
+**Start DolphinScheduler**
+
+Note that, DolphinScheduler has not officially released the updated version supporting OpenMLDB Task (only on the `dev` branch), so please download [dolphinscheduler-bin](http://openmldb.ai/download/dolphinschduler-task/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz) that is prepared by us to have the DolphinScheduler version supporting OpenMLDB Task.
Start the DolphinScheduler standalone version. The steps are as follows. For more information, please refer to [Official Documentation](https://dolphinscheduler.apache.org/en-us/docs/3.0.0/user_doc/guide/installation/standalone.html)。
```
tar -xvzf apache-dolpSchedulerler-*-bin.tar.gz
cd apache-dolpSchedulerler-*-bin
+sed -i s#/opt/soft/python#/usr/bin/python3#g bin/env/dolphinscheduler_env.sh
sh ./bin/dolpSchedulerler-daemon.sh start standalone-server
```
Now you can login to DolphinScheduler at http://localhost:12345/dolphinscheduler/ui . The default user name and password are: admin/dolphinScheduler123。
-The worker server of DolphinScheduler requires the OpenMLDB Python SDK. The worker of DolphinScheduler standalone is the local machine, so you only need to install the OpenMLDB Python SDK on the local machine. The Python SDK is ready in our OpenMLDB image. If you are not running the docker image, install the SDK by:
+We have set the Python environment by modify `PYTHON_HOME` in `bin/env/dolphinscheduler_env.sh`, as shown in the previous code(Python Task needs to explicitly set the python environment, cuz we use Python3). If you have started the DolphinScheduler already, you can also set the environment on the web page after startup. The setting method is as follows. **Note that in this case, it is necessary to confirm that all tasks in the workflow use this environment**
-```
-pip3 install openmldb
-```
+![ds env setting](images/ds_env_setting.png)
-Workflows can be created manually. In this example, we directly provide JSON workflow files, [Click to Download](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/workflow_openmldb_demo.json), and you can directly import it later into the DolphinScheduler environment and make simple modifications to complete the whole workflow.
+![set python env](images/set_python_env.png)
-Python task needs to explicitly set the python environment. The simplest way is to set the Python environment in bin/env/dolphinscheduler_env.sh to modify `PYTHON_HOME`, and then start the DolphinScheduler. Please fill in the absolute path of Python3 instead of the relative path.
```{caution}
Note that before the DolphinScheduler standalone runs, the configured temporary environment variable `PYTHON_HOME` does not affect the environment in the work server.
```
-If you have started the DolphinScheduler already, you can also set the environment on the web page after startup. The setting method is as follows. **Note that in this case, it is necessary to confirm that all tasks in the workflow use this environment**
-![ds env setting](images/ds_env_setting.png)
-![set python env](images/set_python_env.png)
+```{note}
+The worker server of DolphinScheduler requires the OpenMLDB Python SDK. The worker of DolphinScheduler standalone is the local machine, so you only need to install the OpenMLDB Python SDK on the local machine. The Python SDK is ready in our OpenMLDB image. If you are not running the docker image, install the SDK by `pip3 install openmldb`.
+```
+
+**Download workflow json**
+
+Workflows can be created manually. In this example, we directly provide JSON workflow files, [Click to Download](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/workflow_openmldb_demo.json), and you can directly import it later into the DolphinScheduler environment and make simple modifications to complete the whole workflow.
+
+**Source Data**
+
+The workflow will load data from `/tmp/train_sample.csv`,so prepare it:
+```
+cp /work/talkingdata/train_sample.csv /tmp
+```
### Demo Steps
#### Step 1. Initialize Configuration
-![tenant manage](images/ds_tenant_manage.png)
You need to first create a tenant in the DolphinScheduler Web, and then enter the tenant management interface, fill in the operating system user with sudo permission, and use the default for the queue. The root user can be used directly in the docker container.
+![create tenant](images/ds_create_tenant.png)
+
Then you need to bind the tenant to the user. For simplicity, we directly bind to the admin user. Enter the user management page and click edit admin user.
+
![bind tenant](images/ds_bind_tenant.png)
+
After binding, the user status is similar to the following figure.
+
![bind status](images/ds_bind_status.png)
#### Step 2. Create Workflow
In the DolphinScheduler, you need to create a project first, and then create a workflow in the project. Therefore, first create a test project, as shown in the following figure. Click create a project and enter the project.
+
![create project](images/ds_create_project.png)
+
![project](images/ds_project.png)
After entering the project, you can import the [downloaded workflow file](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/workflow_openmldb_demo.json). As shown in the following figure, please click Import workflow in the workflow definition interface.
+
![import workflow](images/ds_import_workflow.png)
After the import, the workflow will appear in the workflow list, similar to the following figure.
+
![workflow list](images/ds_workflow_list.png)
Then you click the workflow name to view the workflow details, as shown in the following figure.
+
![workflow detail](images/ds_workflow_detail.png)
**Note**: This needs to be modified because the task ID will change after importing the workflow. In particular, the upstream and downstream id in the switch task do not exist and need to be manually changed.
-![image-20220610163343993](images/ds_switch.png)
+![switch](images/ds_switch.png)
As shown in the above figure, there is a non-existent ID in the settings of the switch task. Please change the successful and failed "branch flow" and "pre-check condition" to the task of the current workflow.
The correct result is shown in the following figure:
-![image-20220610163515122](images/ds_switch_right.png)
+![right](images/ds_switch_right.png)
After modification, we save the workflow. Tenant in the imported workflow will be deemed as default in the default mode and also can be run. If you want to specify your tenant, please select a tenant when saving the workflow, as shown in the following figure.
![set tenant](images/ds_set_tenant.png)
@@ -118,9 +153,11 @@ After modification, we save the workflow. Tenant in the imported workflow will b
#### Step 3. Online Operation
After saving the workflow, you need to go online before running. The run button will not light up until it is online. As shown in the following figure.
+
![run](images/ds_run.png)
Please click run and wait for the workflow to complete. You can view the workflow running details in the Workflow Instance interface, as shown in the following figure.
+
![run status](images/ds_run_status.png)
To demonstrate the process of a successful launch, the validation does not perform actual validation, but directly returns the validation success and flows into the deploy branch. After running the deploy branch, the deploy SQL and subsequent tasks are successful, the predict server receives the latest model.
@@ -136,4 +173,17 @@ curl -X POST 127.0.0.1:8881/predict -d '{"ip": 114904,
"is_attributed": 0}'
```
The returned results are as follows:
+
![predict](images/ds_predict.png)
+
+#### Supplement
+
+If you rerun the workflow, `deploy sql` task may failed cause deployment`demo` is exists. Please delete the deployment in container before rerun the workflow:
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --database=demo_db --interactive=false --cmd="drop deployment demo;"
+```
+
+You can check if deployment is deleted:
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --database=demo_db --interactive=false --cmd="show deployment demo;"
+```
diff --git a/docs/en/use_case/images/Byzer_Notebook.jpg b/docs/en/use_case/images/Byzer_Notebook.jpg
new file mode 100644
index 00000000000..18ae0f85739
Binary files /dev/null and b/docs/en/use_case/images/Byzer_Notebook.jpg differ
diff --git a/docs/en/use_case/index.rst b/docs/en/use_case/index.rst
index 8f18bc4d15d..770b85ca958 100644
--- a/docs/en/use_case/index.rst
+++ b/docs/en/use_case/index.rst
@@ -9,3 +9,6 @@ Use Cases
pulsar_connector_demo
kafka_connector_demo
dolphinscheduler_task_demo
+ JD_recommendation_en
+ OpenMLDB_Byzer_taxi
+
diff --git a/docs/en/use_case/kafka_connector_demo.md b/docs/en/use_case/kafka_connector_demo.md
index 1ad21053e29..c98e4eebef2 100644
--- a/docs/en/use_case/kafka_connector_demo.md
+++ b/docs/en/use_case/kafka_connector_demo.md
@@ -22,7 +22,7 @@ For OpenMLDB Kafka Connector implementation, please refer to [extensions/kafka-c
This article will start the OpenMLDB in docker container, so there is no need to download the OpenMLDB separately. Moreover, Kafka and connector can be started in the same container. We recommend that you save the three downloaded packages to the same directory. Let's assume that the packages are in the `/work/kafka` directory.
```
-docker run -it -v `pwd`:/work/kafka --name openmldb 4pdosc/openmldb:0.5.2 bash
+docker run -it -v `pwd`:/work/kafka --name openmldb 4pdosc/openmldb:0.6.3 bash
```
### Steps
diff --git a/docs/en/use_case/lightgbm_demo.md b/docs/en/use_case/lightgbm_demo.md
index 546c4f3788e..e3ed232122d 100644
--- a/docs/en/use_case/lightgbm_demo.md
+++ b/docs/en/use_case/lightgbm_demo.md
@@ -1,4 +1,4 @@
-### OpenMLDB + LightGBM: Taxi Trip Duration Prediction
+# OpenMLDB + LightGBM: Taxi Trip Duration Prediction
In this document, we will take [the taxi travel time prediction problem on Kaggle as an example](https://www.kaggle.com/c/nyc-taxi-trip-duration/overview) to demonstrate how to use the OpenMLDB and LightGBM together to build a complete machine learning application.
@@ -13,7 +13,7 @@ Note that: (1) this case is based on the OpenMLDB cluster version for tutorial d
- Pull the OpenMLDB docker image and run the corresponding container:
```bash
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
The image is preinstalled with OpenMLDB and preset with all scripts, third-party libraries, open-source tools and training data required for this case.
diff --git a/docs/en/use_case/pulsar_connector_demo.md b/docs/en/use_case/pulsar_connector_demo.md
index 7da18552d45..d7bcc2f607c 100644
--- a/docs/en/use_case/pulsar_connector_demo.md
+++ b/docs/en/use_case/pulsar_connector_demo.md
@@ -10,7 +10,7 @@ Note that, for the sake of simplicity, for this document, we use Pulsar Standalo
### Download
-- You can download the entire demo package [here](https://github.com/vagetablechicken/pulsar-openmldb-connector-demo/releases/download/v0.2/files.tar.gz), which are needed by this demo, including the connector nar, schema files, and config files.
+- You can download the entire demo package [here](https://openmldb.ai/download/pulsar-connector/files.tar.gz), which are needed by this demo, including the connector nar, schema files, and config files.
- If you would like to download the connector only, you can [download it here](https://github.com/4paradigm/OpenMLDB/releases/download/v0.4.4/pulsar-io-jdbc-openmldb-2.11.0-SNAPSHOT.nar) from the OpenMLDB release.
@@ -29,7 +29,7 @@ Only OpenMLDB cluster mode can be the sink dist, and only write to online storag
We recommend that you use ‘host network’ to run docker. And bind volume ‘files’ too. The sql scripts are in it.
```
-docker run -dit --network host -v `pwd`/files:/work/taxi-trip/files --name openmldb 4pdosc/openmldb:0.5.2 bash
+docker run -dit --network host -v `pwd`/files:/work/pulsar_files --name openmldb 4pdosc/openmldb:0.6.3 bash
docker exec -it openmldb bash
```
```{note}
@@ -49,7 +49,7 @@ desc connector_test;
```
Run the script:
```
-../openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < files/create.sql
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /work/pulsar_files/create.sql
```
![table desc](images/table.png)
@@ -209,6 +209,6 @@ select *, string(timestamp(pickup_datetime)), string(timestamp(dropoff_datetime)
```
In OpenMLDB container, run:
```
-../openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < files/select.sql
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /work/pulsar_files/select.sql
```
![openmldb result](images/openmldb_result.png)
diff --git a/docs/environment.yml b/docs/environment.yml
index 60b4fdbf106..d711fe629a5 100644
--- a/docs/environment.yml
+++ b/docs/environment.yml
@@ -1,4 +1,7 @@
name: sphinx
+channels:
+ - conda-forge
+ - defaults
dependencies:
- alabaster=0.7.12
- babel=2.9.1
@@ -28,7 +31,7 @@ dependencies:
- requests=2.27.1
- setuptools=58.0.4
- snowballstemmer=2.2.0
- - sphinx=4.4.0
+ - sphinx=4.5.0
- sphinxcontrib-applehelp=1.0.2
- sphinxcontrib-devhelp=1.0.2
- sphinxcontrib-htmlhelp=2.0.0
@@ -58,4 +61,4 @@ dependencies:
- sphinx-multiversion==0.2.4
- typing-extensions==4.1.1
- uc-micro-py==1.0.1
- - sphinx-copybutton==0.5.0
\ No newline at end of file
+ - sphinx-copybutton==0.5.0
diff --git a/docs/poetry.lock b/docs/poetry.lock
new file mode 100644
index 00000000000..4660756976b
--- /dev/null
+++ b/docs/poetry.lock
@@ -0,0 +1,738 @@
+[[package]]
+name = "alabaster"
+version = "0.7.12"
+description = "A configurable sidebar-enabled Sphinx theme"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "babel"
+version = "2.10.3"
+description = "Internationalization utilities"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pytz = ">=2015.7"
+
+[[package]]
+name = "beautifulsoup4"
+version = "4.11.1"
+description = "Screen-scraping library"
+category = "dev"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
+name = "certifi"
+version = "2022.6.15"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "charset-normalizer"
+version = "2.1.1"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "dev"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.extras]
+unicode_backport = ["unicodedata2"]
+
+[[package]]
+name = "colorama"
+version = "0.4.5"
+description = "Cross-platform colored terminal text."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "docutils"
+version = "0.17.1"
+description = "Docutils -- Python Documentation Utilities"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "idna"
+version = "3.3"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "imagesize"
+version = "1.4.1"
+description = "Getting image size from png/jpeg/jpeg2000/gif file"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "importlib-metadata"
+version = "4.12.0"
+description = "Read metadata from Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"]
+perf = ["ipython"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"]
+
+[[package]]
+name = "jinja2"
+version = "3.1.2"
+description = "A very fast and expressive template engine."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "linkify-it-py"
+version = "1.0.3"
+description = "Links recognition library with FULL unicode support."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+uc-micro-py = "*"
+
+[package.extras]
+test = ["pytest-cov", "pytest", "coverage"]
+doc = ["myst-parser", "sphinx-book-theme", "sphinx"]
+dev = ["black", "flake8", "isort", "pre-commit"]
+benchmark = ["pytest-benchmark", "pytest"]
+
+[[package]]
+name = "markdown-it-py"
+version = "2.1.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+testing = ["pytest-regressions", "pytest-cov", "pytest", "coverage"]
+rtd = ["sphinx-book-theme", "sphinx-design", "sphinx-copybutton", "sphinx", "pyyaml", "myst-parser", "attrs"]
+profiling = ["gprof2dot"]
+plugins = ["mdit-py-plugins"]
+linkify = ["linkify-it-py (>=1.0,<2.0)"]
+compare = ["panflute (>=2.1.3,<2.2.0)", "mistune (>=2.0.2,<2.1.0)", "mistletoe (>=0.8.1,<0.9.0)", "markdown (>=3.3.6,<3.4.0)", "commonmark (>=0.9.1,<0.10.0)"]
+code_style = ["pre-commit (==2.6)"]
+benchmarking = ["pytest-benchmark (>=3.2,<4.0)", "pytest", "psutil"]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.1"
+description = "Safely add untrusted strings to HTML/XML markup."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "mdit-py-plugins"
+version = "0.3.0"
+description = "Collection of plugins for markdown-it-py"
+category = "dev"
+optional = false
+python-versions = "~=3.6"
+
+[package.dependencies]
+markdown-it-py = ">=1.0.0,<3.0.0"
+
+[package.extras]
+testing = ["pytest-regressions", "pytest-cov", "pytest (>=3.6,<4)", "coverage"]
+rtd = ["sphinx-book-theme (>=0.1.0,<0.2.0)", "myst-parser (>=0.14.0,<0.15.0)"]
+code_style = ["pre-commit (==2.6)"]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "myst-parser"
+version = "0.18.0"
+description = "An extended commonmark compliant parser, with bridges to docutils & sphinx."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+docutils = ">=0.15,<0.19"
+jinja2 = "*"
+linkify-it-py = {version = ">=1.0,<2.0", optional = true, markers = "extra == \"linkify\""}
+markdown-it-py = ">=1.0.0,<3.0.0"
+mdit-py-plugins = ">=0.3.0,<0.4.0"
+pyyaml = "*"
+sphinx = ">=4,<6"
+typing-extensions = "*"
+
+[package.extras]
+code_style = ["pre-commit (>=2.12,<3.0)"]
+linkify = ["linkify-it-py (>=1.0,<2.0)"]
+rtd = ["ipython", "sphinx-book-theme", "sphinx-design", "sphinxext-rediraffe (>=0.2.7,<0.3.0)", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-opengraph (>=0.6.3,<0.7.0)"]
+testing = ["beautifulsoup4", "coverage", "pytest (>=6,<7)", "pytest-cov", "pytest-regressions", "pytest-param-files (>=0.3.4,<0.4.0)", "sphinx-pytest"]
+
+[[package]]
+name = "packaging"
+version = "21.3"
+description = "Core utilities for Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+
+[[package]]
+name = "pydata-sphinx-theme"
+version = "0.8.1"
+description = "Bootstrap-based Sphinx theme from the PyData community"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+beautifulsoup4 = "*"
+docutils = "!=0.17.0"
+packaging = "*"
+sphinx = ">=3.5.4,<5"
+
+[package.extras]
+dev = ["pydata-sphinx-theme", "nox", "pre-commit", "pyyaml"]
+coverage = ["pydata-sphinx-theme", "codecov", "pytest-cov"]
+test = ["pydata-sphinx-theme", "pytest"]
+doc = ["xarray", "numpy", "plotly", "jupyter-sphinx", "sphinx-sitemap", "sphinxext-rediraffe", "pytest-regressions", "pytest", "pandas", "myst-parser", "numpydoc"]
+
+[[package]]
+name = "pygments"
+version = "2.13.0"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pyparsing"
+version = "3.0.9"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+category = "dev"
+optional = false
+python-versions = ">=3.6.8"
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pytz"
+version = "2022.2.1"
+description = "World timezone definitions, modern and historical"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "pyyaml"
+version = "6.0"
+description = "YAML parser and emitter for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "requests"
+version = "2.28.1"
+description = "Python HTTP for Humans."
+category = "dev"
+optional = false
+python-versions = ">=3.7, <4"
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<3"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "soupsieve"
+version = "2.3.2.post1"
+description = "A modern CSS selector implementation for Beautiful Soup."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "sphinx"
+version = "4.5.0"
+description = "Python documentation generator"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+alabaster = ">=0.7,<0.8"
+babel = ">=1.3"
+colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""}
+docutils = ">=0.14,<0.18"
+imagesize = "*"
+importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
+Jinja2 = ">=2.3"
+packaging = "*"
+Pygments = ">=2.0"
+requests = ">=2.5.0"
+snowballstemmer = ">=1.1"
+sphinxcontrib-applehelp = "*"
+sphinxcontrib-devhelp = "*"
+sphinxcontrib-htmlhelp = ">=2.0.0"
+sphinxcontrib-jsmath = "*"
+sphinxcontrib-qthelp = "*"
+sphinxcontrib-serializinghtml = ">=1.1.5"
+
+[package.extras]
+docs = ["sphinxcontrib-websupport"]
+lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "docutils-stubs", "types-typed-ast", "types-requests"]
+test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"]
+
+[[package]]
+name = "sphinx-book-theme"
+version = "0.3.3"
+description = "A clean book theme for scientific explanations and documentation with Sphinx"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+pydata-sphinx-theme = ">=0.8.0,<0.9.0"
+pyyaml = "*"
+sphinx = ">=3,<5"
+
+[package.extras]
+test = ["sphinx-thebe", "pytest-regressions (>=2.0.1,<2.1.0)", "pytest-cov", "pytest (>=6.0.1,<6.1.0)", "myst-nb (>=0.13.2,<0.14.0)", "coverage", "beautifulsoup4 (>=4.6.1,<5)"]
+doc = ["sphinxext-opengraph", "sphinxcontrib-youtube", "sphinxcontrib-bibtex (>=2.2,<3.0)", "sphinx-thebe (>=0.1.1)", "sphinx-togglebutton (>=0.2.1)", "sphinx-tabs", "sphinx-copybutton", "sphinx-examples", "sphinx-design", "sphinx (>=4.0,<5.0)", "plotly", "pandas", "nbclient", "myst-nb (>=0.13.2,<0.14.0)", "numpydoc", "matplotlib", "numpy", "folium", "ipywidgets", "ablog (>=0.10.13,<0.11.0)"]
+code_style = ["pre-commit (>=2.7.0,<2.8.0)"]
+
+[[package]]
+name = "sphinx-copybutton"
+version = "0.5.0"
+description = "Add a copy button to each of your code cells."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+sphinx = ">=1.8"
+
+[package.extras]
+rtd = ["sphinx-book-theme", "myst-nb", "ipython", "sphinx"]
+code_style = ["pre-commit (==2.12.1)"]
+
+[[package]]
+name = "sphinx-multiversion"
+version = "0.2.4"
+description = "Add support for multiple versions to sphinx"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+sphinx = ">=2.1"
+
+[[package]]
+name = "sphinxcontrib-applehelp"
+version = "1.0.2"
+description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["pytest"]
+lint = ["docutils-stubs", "mypy", "flake8"]
+
+[[package]]
+name = "sphinxcontrib-devhelp"
+version = "1.0.2"
+description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["pytest"]
+lint = ["docutils-stubs", "mypy", "flake8"]
+
+[[package]]
+name = "sphinxcontrib-htmlhelp"
+version = "2.0.0"
+description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+test = ["html5lib", "pytest"]
+lint = ["docutils-stubs", "mypy", "flake8"]
+
+[[package]]
+name = "sphinxcontrib-jsmath"
+version = "1.0.1"
+description = "A sphinx extension which renders display math in HTML via JavaScript"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["mypy", "flake8", "pytest"]
+
+[[package]]
+name = "sphinxcontrib-qthelp"
+version = "1.0.3"
+description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["pytest"]
+lint = ["docutils-stubs", "mypy", "flake8"]
+
+[[package]]
+name = "sphinxcontrib-serializinghtml"
+version = "1.1.5"
+description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["pytest"]
+lint = ["docutils-stubs", "mypy", "flake8"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.3.0"
+description = "Backported and Experimental Type Hints for Python 3.7+"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "uc-micro-py"
+version = "1.0.1"
+description = "Micro subset of unicode data files for linkify-it-py projects."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+test = ["pytest-cov", "pytest", "coverage"]
+
+[[package]]
+name = "urllib3"
+version = "1.26.12"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
+
+[package.extras]
+brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "zipp"
+version = "3.8.1"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
+
+[metadata]
+lock-version = "1.1"
+python-versions = "^3.8"
+content-hash = "d7090b100e7073238e539b72da34b49d83f77454206ebf164d71f1f96f9e1a0f"
+
+[metadata.files]
+alabaster = [
+ {file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"},
+ {file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"},
+]
+babel = [
+ {file = "Babel-2.10.3-py3-none-any.whl", hash = "sha256:ff56f4892c1c4bf0d814575ea23471c230d544203c7748e8c68f0089478d48eb"},
+ {file = "Babel-2.10.3.tar.gz", hash = "sha256:7614553711ee97490f732126dc077f8d0ae084ebc6a96e23db1482afabdb2c51"},
+]
+beautifulsoup4 = [
+ {file = "beautifulsoup4-4.11.1-py3-none-any.whl", hash = "sha256:58d5c3d29f5a36ffeb94f02f0d786cd53014cf9b3b3951d42e0080d8a9498d30"},
+ {file = "beautifulsoup4-4.11.1.tar.gz", hash = "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"},
+]
+certifi = [
+ {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"},
+ {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"},
+]
+charset-normalizer = [
+ {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"},
+ {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"},
+]
+colorama = [
+ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
+ {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
+]
+docutils = [
+ {file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"},
+ {file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"},
+]
+idna = [
+ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
+ {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
+]
+imagesize = [
+ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
+ {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
+]
+importlib-metadata = [
+ {file = "importlib_metadata-4.12.0-py3-none-any.whl", hash = "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"},
+ {file = "importlib_metadata-4.12.0.tar.gz", hash = "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670"},
+]
+jinja2 = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+]
+linkify-it-py = [
+ {file = "linkify-it-py-1.0.3.tar.gz", hash = "sha256:2b3f168d5ce75e3a425e34b341a6b73e116b5d9ed8dbbbf5dc7456843b7ce2ee"},
+ {file = "linkify_it_py-1.0.3-py3-none-any.whl", hash = "sha256:11e29f00150cddaa8f434153f103c14716e7e097a8fd372d9eb1ed06ed91524d"},
+]
+markdown-it-py = [
+ {file = "markdown-it-py-2.1.0.tar.gz", hash = "sha256:cf7e59fed14b5ae17c0006eff14a2d9a00ed5f3a846148153899a0224e2c07da"},
+ {file = "markdown_it_py-2.1.0-py3-none-any.whl", hash = "sha256:93de681e5c021a432c63147656fe21790bc01231e0cd2da73626f1aa3ac0fe27"},
+]
+markupsafe = [
+ {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"},
+ {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"},
+]
+mdit-py-plugins = [
+ {file = "mdit-py-plugins-0.3.0.tar.gz", hash = "sha256:ecc24f51eeec6ab7eecc2f9724e8272c2fb191c2e93cf98109120c2cace69750"},
+ {file = "mdit_py_plugins-0.3.0-py3-none-any.whl", hash = "sha256:b1279701cee2dbf50e188d3da5f51fee8d78d038cdf99be57c6b9d1aa93b4073"},
+]
+mdurl = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+myst-parser = [
+ {file = "myst-parser-0.18.0.tar.gz", hash = "sha256:739a4d96773a8e55a2cacd3941ce46a446ee23dcd6b37e06f73f551ad7821d86"},
+ {file = "myst_parser-0.18.0-py3-none-any.whl", hash = "sha256:4965e51918837c13bf1c6f6fe2c6bddddf193148360fbdaefe743a4981358f6a"},
+]
+packaging = [
+ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
+ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+]
+pydata-sphinx-theme = [
+ {file = "pydata_sphinx_theme-0.8.1-py3-none-any.whl", hash = "sha256:af2c99cb0b43d95247b1563860942ba75d7f1596360594fce510caaf8c4fcc16"},
+ {file = "pydata_sphinx_theme-0.8.1.tar.gz", hash = "sha256:96165702253917ece13dd895e23b96ee6dce422dcc144d560806067852fe1fed"},
+]
+pygments = [
+ {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"},
+ {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"},
+]
+pyparsing = [
+ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
+ {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
+]
+pytz = [
+ {file = "pytz-2022.2.1-py2.py3-none-any.whl", hash = "sha256:220f481bdafa09c3955dfbdddb7b57780e9a94f5127e35456a48589b9e0c0197"},
+ {file = "pytz-2022.2.1.tar.gz", hash = "sha256:cea221417204f2d1a2aa03ddae3e867921971d0d76f14d87abb4414415bbdcf5"},
+]
+pyyaml = [
+ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
+ {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
+ {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
+ {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+ {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
+ {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
+ {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
+ {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
+ {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
+ {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
+ {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
+ {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
+ {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
+ {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
+ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
+ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+]
+requests = [
+ {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"},
+ {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"},
+]
+snowballstemmer = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+soupsieve = [
+ {file = "soupsieve-2.3.2.post1-py3-none-any.whl", hash = "sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759"},
+ {file = "soupsieve-2.3.2.post1.tar.gz", hash = "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"},
+]
+sphinx = [
+ {file = "Sphinx-4.5.0-py3-none-any.whl", hash = "sha256:ebf612653238bcc8f4359627a9b7ce44ede6fdd75d9d30f68255c7383d3a6226"},
+ {file = "Sphinx-4.5.0.tar.gz", hash = "sha256:7bf8ca9637a4ee15af412d1a1d9689fec70523a68ca9bb9127c2f3eeb344e2e6"},
+]
+sphinx-book-theme = [
+ {file = "sphinx_book_theme-0.3.3-py3-none-any.whl", hash = "sha256:9685959dbbb492af005165ef1b9229fdd5d5431580ac181578beae3b4d012d91"},
+ {file = "sphinx_book_theme-0.3.3.tar.gz", hash = "sha256:0ec36208ff14c6d6bf8aee1f1f8268e0c6e2bfa3cef6e41143312b25275a6217"},
+]
+sphinx-copybutton = [
+ {file = "sphinx-copybutton-0.5.0.tar.gz", hash = "sha256:a0c059daadd03c27ba750da534a92a63e7a36a7736dcf684f26ee346199787f6"},
+ {file = "sphinx_copybutton-0.5.0-py3-none-any.whl", hash = "sha256:9684dec7434bd73f0eea58dda93f9bb879d24bff2d8b187b1f2ec08dfe7b5f48"},
+]
+sphinx-multiversion = [
+ {file = "sphinx-multiversion-0.2.4.tar.gz", hash = "sha256:5cd1ca9ecb5eed63cb8d6ce5e9c438ca13af4fa98e7eb6f376be541dd4990bcb"},
+ {file = "sphinx_multiversion-0.2.4-py3-none-any.whl", hash = "sha256:dec29f2a5890ad68157a790112edc0eb63140e70f9df0a363743c6258fbeb478"},
+]
+sphinxcontrib-applehelp = [
+ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
+ {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"},
+]
+sphinxcontrib-devhelp = [
+ {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
+ {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
+]
+sphinxcontrib-htmlhelp = [
+ {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"},
+ {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"},
+]
+sphinxcontrib-jsmath = [
+ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
+ {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
+]
+sphinxcontrib-qthelp = [
+ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
+ {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
+]
+sphinxcontrib-serializinghtml = [
+ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
+ {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
+]
+typing-extensions = [
+ {file = "typing_extensions-4.3.0-py3-none-any.whl", hash = "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02"},
+ {file = "typing_extensions-4.3.0.tar.gz", hash = "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"},
+]
+uc-micro-py = [
+ {file = "uc-micro-py-1.0.1.tar.gz", hash = "sha256:b7cdf4ea79433043ddfe2c82210208f26f7962c0cfbe3bacb05ee879a7fdb596"},
+ {file = "uc_micro_py-1.0.1-py3-none-any.whl", hash = "sha256:316cfb8b6862a0f1d03540f0ae6e7b033ff1fa0ddbe60c12cbe0d4cec846a69f"},
+]
+urllib3 = [
+ {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"},
+ {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"},
+]
+zipp = [
+ {file = "zipp-3.8.1-py3-none-any.whl", hash = "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"},
+ {file = "zipp-3.8.1.tar.gz", hash = "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2"},
+]
diff --git a/docs/pyproject.toml b/docs/pyproject.toml
new file mode 100644
index 00000000000..465608a801b
--- /dev/null
+++ b/docs/pyproject.toml
@@ -0,0 +1,20 @@
+[tool.poetry]
+name = "openmldb-docs"
+version = "0.6.0"
+description = "OpenMLDB Documents"
+authors = ["4Paradigm Authors "]
+license = "Apache-2.0"
+
+[tool.poetry.dependencies]
+python = "^3.8"
+
+[tool.poetry.dev-dependencies]
+Sphinx = "4.5.0"
+sphinx-multiversion = "^0.2.4"
+sphinx-book-theme = "^0.3.3"
+myst-parser = {extras = ["linkify"], version = "^0.18.0"}
+sphinx-copybutton = "^0.5.0"
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
diff --git a/docs/zh/README.md b/docs/zh/README.md
index be5b1030059..c4f2dbd2988 100644
--- a/docs/zh/README.md
+++ b/docs/zh/README.md
@@ -86,7 +86,7 @@
- [删除DEPLOYMENT(DROP DEPLOYMENT)](reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md)
- [查看DEPLOYMENTS列表(SHOW DEPLOYMENTS)](reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md)
- [查看DEPLOYMENT详情(SHOW DEPLOYMENT)](reference/sql/deployment_manage/SHOW_DEPLOYMENT.md)
- - [OpenMLDB SQL上线规范和要求](reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)
+ - [OpenMLDB SQL上线规范和要求](reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md)
- [任务管理](reference/sql/task_manage/reference.md)
- [查看Job列表(SHOW JOBS)](reference/sql/task_manage/SHOW_JOBS.md)
- [查看Job详情(SHOW JOB)](reference/sql/task_manage/SHOW_JOB.md)
diff --git a/docs/zh/SUMMARY.md b/docs/zh/SUMMARY.md
index f0f5261e40e..5c47b946dbb 100644
--- a/docs/zh/SUMMARY.md
+++ b/docs/zh/SUMMARY.md
@@ -76,7 +76,7 @@
- [删除DEPLOYMENT(DROP DEPLOYMENT)](reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md)
- [查看DEPLOYMENTS列表(SHOW DEPLOYMENTS)](reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md)
- [查看DEPLOYMENT详情(SHOW DEPLOYMENT)](reference/sql/deployment_manage/SHOW_DEPLOYMENT.md)
- - [OpenMLDB SQL上线规范和要求](reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)
+ - [OpenMLDB SQL上线规范和要求](reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md)
- [任务管理](reference/sql/task_manage/reference.md)
- [查看Job列表(SHOW JOBS)](reference/sql/task_manage/SHOW_JOBS.md)
- [查看Job详情(SHOW JOB)](reference/sql/task_manage/SHOW_JOB.md)
diff --git a/docs/zh/about/release_notes.md b/docs/zh/about/release_notes.md
index be0cef823e1..8e333dacd62 100644
--- a/docs/zh/about/release_notes.md
+++ b/docs/zh/about/release_notes.md
@@ -1,5 +1,114 @@
# Release Notes
+## v0.6.3 Release Notes
+
+### Features
+- Support setting the configuration of `glog` for clients (#2482 @vagetablechicken)
+- Add the checksum of SHA256 for release packages (#2560 @team-317)
+- Support the new build-in function `unhex` (#2431 @aucker)
+- Support the readable date and time format in CLI (#2568 @dl239)
+- Support the `LAST JOIN` with a subquery as a producer of window node in the request mode (#2569 @aceforeverd)
+- Upgrade the Spark version to 3.2.1 (#2566 @tobegit3hub, #2635 @dl239)
+- Support setting the SQL cache size in SDKs (#2605 @vagetablechicken)
+- Add a new interface of `ValidateSQL` to validate the syntax of SQL (#2626 @vagetablechicken)
+- Improve the documents (#2405 #2492 $2562 #2496 #2495 #2436 #2487 #2623 @michelle-qinqin, #2543 @linjing-lab, #2584 @JourneyGo, #2567 #2583 @vagetablechicken, #2643 @dl239)
+- Other minor features (#2504 #2572 #2498 #2598 @aceforeverd, #2555 #2641 @tobegit3hub, #2550 @zhanghaohit, #2595 @Elliezza, #2592 @vagetablechicken)
+
+### Bug Fixes
+- After a nameserver restarting, deployments may not recover. (#2533 @dl239)
+- If the type of first column is `bool`, it fails to resolve the function `count_where`. (#2570 @aceforeverd)
+- Other minor bug fixes (#2540 #2577 #2625 #2655 @dl239, #2585 @snehalsenapati23, #2539 @vagetablechicken)
+
+### Code Refactoring
+#2516 #2520 #2522 #2521 #2542 #2531 #2581 @haseeb-xd, #2525 #2526 #2527 #2528 @kstrifonoff, #2523 @ighmaZ, #2546 #2549 @NevilleMthw, #2559 @marandabui, #2554 @gokullan, #2580 @team-317, #2599 @lbartyczak, #2594 @shivamgupta-sg, #2571 @Jake-00
+
+## v0.6.2 Release Notes
+
+### Features
+- Support independently executing the OpenMLDB offline engine without the OpenMLDB deployment (#2423 @tobegit3hub)
+- Support the log setting of ZooKeeper and disable ZooKeeper logs in the diagnostic tool (#2451 @vagetablechicken)
+- Support query parameters of the SQL query APIs (#2277 @qsliu2017)
+- Improve the documents (#2406 @aceforeverd, #2408 #2414 @vagetablechicken, #2410 #2402 #2356 #2374 #2396 #2376 #2419 @michelle-qinqin, #2424 #2418 @dl239, #2455 @lumianph, #2458 @tobegit3hub)
+- Other minor features (#2420 @aceforeverd, #2411 @wuyou10206, #2446 #2452 @vagetablechicken, #2475 @tobegit3hub)
+
+### Bug Fixes
+- Table creation succeeds even if `partitionnum` is set to 0, which should report an error. (#2220 @dl239)
+- There are thread races in aggregators if there are concurrent `puts`. (#2472 @zhanghaohit)
+- The `limit` clause dose not work if it is used with the `where` and `group by` clauses. (#2447 @aceforeverd)
+- The `TaskManager` process will terminate if ZooKeeper disconnects. (#2494 @tobegit3hub)
+- The replica cluster dose not create the database if a database is created in the leader cluster (#2488 @dl239)
+- When there is data in base tables, deployment with long windows still can be executed (which should report an error). (#2501 @zhanghaohit)
+- Other minor bug fixes (#2415 @aceforeverd, #2417 #2434 #2435 #2473 @dl239, #2466 @vagetablechicken)
+
+### Code Refactoring
+#2413 @dl239, #2470 #2467 #2468 @vagetablechicken
+
+## v0.6.1 Release Notes
+
+### Features
+- Support new build-in functions `last_day` and `regexp_like` (#2262 @HeZean, #2187 @jiang1997)
+- Support Jupyter Notebook for the TalkingData use case (#2354 @vagetablechicken)
+- Add a new API to disable Saprk logs of the batch engine (#2359 @tobegit3hub)
+- Add the use case of precision marketing based on OneFlow (#2267 @Elliezza @vagetablechicken @siqi)
+- Support the RPC request timeout in CLI and Python SDK (#2371 @vagetablechicken)
+- Improve the documents (#2021 @liuceyim, #2348 #2316 #2324 #2361 #2315 #2323 #2355 #2328 #2360 #2378 #2319 #2350 #2395 #2398 @michelle-qinqin, #2373 @njzyfr, #2370 @tobegit3hub, #2367 #2382 #2375 #2401 @vagetablechicken, #2387 #2394 @dl239, #2379 @aceforeverd, #2403 @lumianph, #2400 gitpod-for-oss @aceforeverd, )
+- Other minor features (#2363 @aceforeverd, #2185 @qsliu2017)
+
+### Bug Fixes
+- `APIServer` will core dump if no `rs` in `QueryResp`. (#2346 @vagetablechicken)
+- Data has not been deleted from `pre-aggr` tables if there are delete operations in a main table. (#2300 @zhanghaohit)
+- Task jobs will core dump when enabling `UnsafeRowOpt` with multiple threads in the Yarn cluster. (#2352 #2364 @tobegit3hub)
+- Other minor bug fixes (#2336 @dl239, #2337 @dl239, #2385 #2372 @aceforeverd, #2383 #2384 @vagetablechicken)
+
+### Code Refactoring
+#2310 @hv789, #2306 #2305 @yeya24, #2311 @Mattt47, #2368 @TBCCC, #2391 @PrajwalBorkar, #2392 @zahyaah, #2405 @wang-jiahua
+
+## v0.6.0 Release Notes
+
+### Highlights
+
+- Add a new toolkit of managing OpenMLDB, currently including a diagnostic tool and a log collector (#2299 #2326 @dl239 @vagetablechicken)
+- Support aggregate functions with suffix `_where` using pre-aggregation (#1821 #1841 #2321 #2255 #2321 @aceforeverd @nautaa @zhanghaohit)
+- Support a new SQL syntax of `EXCLUDE CURRENT_ROW` (#2053 #2165 #2278 @aceforeverd)
+- Add new OpenMLDB ecosystem plugins for DolphinScheduler (#1921 #1955 @vagetablechicken) and Airflow (#2215 @vagetablechicken)
+
+### Other Features
+
+- Support SQL syntax of `DELETE` in SQL and Kafka Connector (#2183 #2257 @dl239)
+- Support customized order in the `insert` statement (#2075 @vagetablechicken)
+- Add a new use case of TalkingData AdTracking Fraud Detection (#2008 @vagetablechicken)
+- Improve the startup script to remove `mon` (#2050 @dl239)
+- Improve the performance of offline batch SQL engine (#1882 #1943 #1973 #2142 #2273 #1773 @tobegit3hub)
+- Support returning version numbers from TaskManager (#2102 @tobegit3hub)
+- Improve the CICD workflow and release procedure (#1873 #2025 #2028 @mangoGoForward)
+- Support GitHub Codespaces (#1922 @nautaa)
+- Support new built-in functions `char(int)`, `char_length`, `character_length`, `radians`, `hex`, `median` (#1896 #1895 #1897 #2159 #2030 @wuxiaobai24 @HGZ-20 @Ivyee17)
+- Support returning result set for a new query API (#2189 @qsliu2017)
+- Improve the documents (#1796 #1817 #1818 #2254 #1948 #2227 #2254 #1824 #1829 #1832 #1840 #1842 #1844 #1845 #1848 #1849 #1851 #1858 #1875 #1923 #1925 #1939 #1942 #1945 #1957 #2031 #2054 #2140 #2195 #2304 #2264 #2260 #2257 #2254 #2247 #2240 #2227 #2115 #2126 #2116 #2154 #2152 #2178 #2147 #2146 #2184 #2138 #2145 #2160 #2197 #2198 #2133 #2224 #2223 #2222 #2209 #2248 #2244 #2242 #2241 #2226 #2225 #2221 #2219 #2201 #2291 # 2231 #2196 #2297 #2206 #2238 #2270 #2296 #2317 #2065 #2048 #2088 #2331 #1831 #1945 #2118 @ZtXavier @pearfl @PrajwalBorkar @tobegit3hub @ZtXavier @zhouxh19 @dl239 @vagetablechicken @tobegit3hub @aceforeverd @jmoldyvan @lumianph @bxiiiiii @michelle-qinqin @yclchuxue @redundan3y)
+
+### Bug Fixes
+
+- The SQL engine may produce incorrect results under certain circumstances. (#1950 #1997 #2024 @aceforeverd)
+- The `genDDL` function generates incorrect DDL if the SQL is partitioned by multiple columns. (#1956 @dl239)
+- The snapshot recovery may fail for disk tables. (#2174 @zhanghaohit)
+- `enable_trace` does not work for some SQL queries. (#2292 @aceforeverd)
+- Tablets cannot save `ttl` when updating the `ttl` of index. (#1935 @dl239)
+- MakeResultSet uses a wrong schema in projection. (#2049 @dl239)
+- A table does not exist when deploying SQL by the APIServer (#2205 @vagetablechicken)
+- The cleanup for ZooKeep does not work properly. (#2191 @mangoGoForward)
+
+Other minor bug fixes (#2052 #1959 #2253 #2273 #2288 #1964 #2175 #1938 #1963 #1956 #2171 #2036 #2170 #2236 #1867 #1869 #1900 #2162 #2161 #2173 #2190 #2084 #2085 #2034 #1972 #1408 #1863 #1862 #1919 #2093 #2167 #2073 #1803 #1998 #2000 #2012 #2055 #2174 #2036 @Xeonacid @CuriousCorrelation @Shigm1026 @jiang1997 @Harshvardhantomar @nautaa @Ivyee17 @frazie @PrajwalBorkar @dl239 @aceforeverd @tobegit3hub @dl239 @vagetablechicken @zhanghaohit @mangoGoForward @SaumyaBhushan @BrokenArrow1404 @harshlancer)
+
+### Code Refactoring
+
+#1884 #1917 #1953 #1965 #2017 #2033 #2044 @mangoGoForward; #2131 #2130 #2112 #2113 #2104 #2107 #2094 #2068 #2071 #2070 #1982 #1878 @PrajwalBorkar; #2158 #2051 #2037 #2015 #1886 #1857 @frazie; #2100 #2096 @KikiDotPy; #2089 @ayushclashroyale; #1994 @fpetrakov; #2079 kayverly; #2062 @WUBBBB; #1843 @1korenn; #2092 @HeZean; #1984 @0sirusD3m0n; #1976 @Jaguar16; #2086 @marc-marcos; #1999 @Albert-Debbarma;
+
+## v0.5.3 Release Notes
+
+### Bug Fixes
+- The SQL file cannot be successfully loaded in the Yarn-Client mode. (#2151 @tobegit3hub)
+- The SQL file cannot be successfully loaded in the Yarn-Cluster mode. (#1993 @tobegit3hub)
+
## v0.5.2 Release Notes
### Features
diff --git a/docs/zh/conf.py b/docs/zh/conf.py
index cd7a5dd6032..ea10bd8f84e 100644
--- a/docs/zh/conf.py
+++ b/docs/zh/conf.py
@@ -35,8 +35,13 @@
'myst_parser',
'sphinx_multiversion',
'sphinx_copybutton',
+'sphinx.ext.autosectionlabel',
]
+autosectionlabel_prefix_document = True
+
+myst_heading_anchors = 6
+
myst_enable_extensions = [
"amsmath",
"colon_fence",
@@ -53,7 +58,6 @@
"tasklist",
]
-myst_heading_anchors = 3
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -120,3 +124,19 @@
html_static_path = []
html_logo = "about/images/openmldb_logo.png"
+
+
+# ================================== #
+# sphinx multiversion configuration #
+# ================================== #
+
+# Whitelist pattern for tags (set to None to ignore all tags)
+# no tags included
+smv_tag_whitelist = None
+
+# Whitelist pattern for branches (set to None to ignore all branches)
+# include branch that is main or v{X}.{Y}
+smv_branch_whitelist = r"^(main|v\d+\.\d+)$"
+
+# allow remote origin or upstream
+smv_remote_whitelist = r"^(origin|upstream)$"
diff --git a/docs/zh/deploy/compile.md b/docs/zh/deploy/compile.md
index de0ba78334d..5102e8b05b8 100644
--- a/docs/zh/deploy/compile.md
+++ b/docs/zh/deploy/compile.md
@@ -4,22 +4,22 @@
此节介绍在官方编译镜像 [hybridsql](https://hub.docker.com/r/4pdosc/hybridsql) 中编译 OpenMLDB。镜像内置了编译所需要的工具和依赖,因此不需要额外的步骤单独配置它们。关于基于非 docker 的编译使用方式,请参照下面的 [编译详细说明](#编译详细说明) 章节。
-关于编译镜像版本,需要注意拉取的镜像版本和 [OpenMLDB 发布版本](https://github.com/4paradigm/OpenMLDB/releases)保持一致。以下例子演示了在 `hybridsql:0.5.0` 镜像版本上编译 [OpenMLDB v0.5.0](https://github.com/4paradigm/OpenMLDB/releases/tag/v0.5.0) 的代码,如果要编译最新 `main` 分支的代码,则需要拉取 `hybridsql:latest` 版本镜像。
+关于编译镜像版本,需要注意拉取的镜像版本和 [OpenMLDB 发布版本](https://github.com/4paradigm/OpenMLDB/releases)保持一致。以下例子演示了在 `hybridsql:0.6.3` 镜像版本上编译 [OpenMLDB v0.6.3](https://github.com/4paradigm/OpenMLDB/releases/tag/v0.6.3) 的代码,如果要编译最新 `main` 分支的代码,则需要拉取 `hybridsql:latest` 版本镜像。
1. 下载 docker 镜像
```bash
- docker pull 4pdosc/hybridsql:0.5
+ docker pull 4pdosc/hybridsql:0.6
```
2. 启动 docker 容器
```bash
- docker run -it 4pdosc/hybridsql:0.5 bash
+ docker run -it 4pdosc/hybridsql:0.6 bash
```
-3. 在 docker 容器内, 克隆 OpenMLDB, 并切换分支到 v0.5.0
+3. 在 docker 容器内, 克隆 OpenMLDB, 并切换分支到 v0.6.3
```bash
cd ~
- git clone -b v0.5.0 https://github.com/4paradigm/OpenMLDB.git
+ git clone -b v0.6.3 https://github.com/4paradigm/OpenMLDB.git
```
4. 在 docker 容器内编译 OpenMLDB
@@ -130,7 +130,7 @@ make CMAKE_BUILD_TYPE=Debug
1. 下载预编译的OpenMLDB Spark发行版。
```bash
-wget https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.5.0/spark-3.0.0-bin-openmldbspark.tgz
+wget https://github.com/4paradigm/spark/releases/download/v3.2.1-openmldb0.6.3/spark-3.2.1-bin-openmldbspark.tgz
```
或者下载源代码并从头开始编译。
@@ -144,8 +144,8 @@ cd ./spark/
2. 设置环境变量 `SPARK_HOME` 来使用 OpenMLDB Spark 的发行版本来运行 OpenMLDB 或者其他应用。
```bash
-tar xzvf ./spark-3.0.0-bin-openmldbspark.tgz
-cd spark-3.0.0-bin-openmldbspark/
+tar xzvf ./spark-3.2.1-bin-openmldbspark.tgz
+cd spark-3.2.1-bin-openmldbspark/
export SPARK_HOME=`pwd`
```
diff --git a/docs/zh/deploy/conf.md b/docs/zh/deploy/conf.md
index 28ebbd3f001..24fc7941e16 100644
--- a/docs/zh/deploy/conf.md
+++ b/docs/zh/deploy/conf.md
@@ -20,7 +20,7 @@
#--thread_pool_size=16
# 配置失败重试次数,默认是3
#--request_max_retry=3
-# 配置请求超时时间,默认是12妙
+# 配置请求超时时间,单位是毫秒,默认是12秒
#--request_timeout_ms=12000
# 配置请求不可达时的重试间隔,一般不需要修改
#--request_sleep_time=1000
@@ -28,7 +28,7 @@
--zk_session_timeout=10000
# 配置zookeeper健康检查间隔,单位是毫秒,一般不需要修改
#--zk_keep_alive_check_interval=15000
-# 配置tablet心跳检测超时时间,默认是1分钟。如果tablet超过这个时间还没连接上,nameserver就认为此tablet不可用,会执行下线该节点的操作
+# 配置tablet心跳检测超时时间,单位是毫秒,默认是1分钟。如果tablet超过这个时间还没连接上,nameserver就认为此tablet不可用,会执行下线该节点的操作
--tablet_heartbeat_timeout=60000
# 配置tablet健康检查间隔,单位是毫秒
#--tablet_offline_check_interval=1000
@@ -39,13 +39,13 @@
#--name_server_task_concurrency=2
# 执行高可用任务的最大并发数
#--name_server_task_max_concurrency=8
-# 执行任务时检查任务的等待时间
+# 执行任务时检查任务的等待时间,单位是毫秒
#--name_server_task_wait_time=1000
-# 执行任务的最大时间,如果超过后就会打日志
+# 执行任务的最大时间,如果超过后就会打日志,单位是毫秒
#--name_server_op_execute_timeout=7200000
-# 获取任务的时间间隔
+# 获取任务的时间间隔,单位是毫秒
#--get_task_status_interval=2000
-# 获取表状态的时间间隔
+# 获取表状态的时间间隔,单位是毫秒
#--get_table_status_interval=2000
# 检查binlog同步进度的最小差值,如果主从offset小于这个值任务已同步成功
#--check_binlog_sync_progress_delta=100000
@@ -88,9 +88,9 @@
--openmldb_log_dir=./logs
# binlog conf
-# binlog没有新数据添加时的等待时间
+# binlog没有新数据添加时的等待时间,单位是毫秒
#--binlog_coffee_time=1000
-# 主从匹配offset的等待时间
+# 主从匹配offset的等待时间,单位是毫秒
#--binlog_match_logoffset_interval=1000
# 有数据写入时是否通知立马同步到follower
--binlog_notify_on_put=true
@@ -98,13 +98,13 @@
--binlog_single_file_max_size=2048
# 主从同步的batch大小
#--binlog_sync_batch_size=32
-# binlog sync到磁盘的时间间隔,单位时毫秒
+# binlog sync到磁盘的时间间隔,单位是毫秒
--binlog_sync_to_disk_interval=5000
# 如果没有新数据同步时的wait时间,单位为毫秒
#--binlog_sync_wait_time=100
# binlog文件名长度
#--binlog_name_length=8
-# 删除binlog文件的时间间隔,单位时毫秒
+# 删除binlog文件的时间间隔,单位是毫秒
#--binlog_delete_interval=60000
# binlog是否开启crc校验
#--binlog_enable_crc=false
@@ -139,7 +139,7 @@
# snapshot conf
# 配置做snapshot的时间,配置为一天中的几点。如23就表示每天23点做snapshot
--make_snapshot_time=23
-# 做snapshot的检查时间间隔
+# 做snapshot的检查时间间隔,单位是毫秒
#--make_snapshot_check_interval=600000
# 做snapshot的offset阈值,如果和上次snapshot的offset差值小于这个值就不会生成新的snapshot
#--make_snapshot_threshold_offset=100000
diff --git a/docs/zh/deploy/install_deploy.md b/docs/zh/deploy/install_deploy.md
index 838b40834df..0d4e9b45c39 100644
--- a/docs/zh/deploy/install_deploy.md
+++ b/docs/zh/deploy/install_deploy.md
@@ -10,7 +10,7 @@
## 部署包准备
-本说明文档中默认使用预编译好的 OpenMLDB 部署包([Linux](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz), [macOS](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-darwin.tar.gz)),所支持的操作系统要求为:CentOS 7, Ubuntu 20.04, macOS >= 10.15。如果用户期望自己编译(如做 OpenMLDB 源代码开发,操作系统或者 CPU 架构不在预编译部署包的支持列表内等原因),用户可以选择在 docker 容器内编译使用或者从源码编译,具体请参照我们的[编译文档](compile.md)。
+本说明文档中默认使用预编译好的 OpenMLDB 部署包([Linux](https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz), [macOS](https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-darwin.tar.gz)),所支持的操作系统要求为:CentOS 7, Ubuntu 20.04, macOS >= 10.15。如果用户期望自己编译(如做 OpenMLDB 源代码开发,操作系统或者 CPU 架构不在预编译部署包的支持列表内等原因),用户可以选择在 docker 容器内编译使用或者从源码编译,具体请参照我们的[编译文档](compile.md)。
## 配置环境(Linux)
@@ -76,10 +76,10 @@ OpenMLDB单机版需要部署一个nameserver和一个tablet. nameserver用于
### 部署tablet
#### 1 下载OpenMLDB部署包
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-tablet-0.5.2
-cd openmldb-tablet-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-tablet-0.6.3
+cd openmldb-tablet-0.6.3
```
#### 2 修改配置文件conf/standalone_tablet.flags
* 修改endpoint。endpoint是用冒号分隔的部署机器ip/域名和端口号
@@ -91,17 +91,17 @@ cd openmldb-tablet-0.5.2
* 如果此处使用的域名, 所有使用openmldb的client所在的机器都得配上对应的host. 不然会访问不到
#### 3 启动服务
```
-sh bin/start.sh start standalone_tablet
+bash bin/start.sh start standalone_tablet
```
**注: 服务启动后会在bin目录下产生standalone_tablet.pid文件, 里边保存启动时的进程号。如果该文件内的pid正在运行则会启动失败**
### 部署nameserver
#### 1 下载OpenMLDB部署包
````
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-ns-0.5.2
-cd openmldb-ns-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-ns-0.6.3
+cd openmldb-ns-0.6.3
````
#### 2 修改配置文件conf/standalone_nameserver.flags
* 修改endpoint。endpoint是用冒号分隔的部署机器ip/域名和端口号
@@ -113,7 +113,7 @@ cd openmldb-ns-0.5.2
**注: endpoint不能用0.0.0.0和127.0.0.1**
#### 3 启动服务
```
-sh bin/start.sh start standalone_nameserver
+bash bin/start.sh start standalone_nameserver
```
#### 4 检查服务是否启动
```bash
@@ -133,10 +133,10 @@ APIServer负责接收http请求,转发给OpenMLDB并返回结果。它是无
#### 1 下载OpenMLDB部署包
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-apiserver-0.5.2
-cd openmldb-apiserver-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-apiserver-0.6.3
+cd openmldb-apiserver-0.6.3
```
#### 2 修改配置文件conf/standalone_apiserver.flags
@@ -156,7 +156,7 @@ cd openmldb-apiserver-0.5.2
#### 3 启动服务
```
-sh bin/start.sh start standalone_apiserver
+bash bin/start.sh start standalone_apiserver
```
## 部署集群版
@@ -170,6 +170,7 @@ OpenMLDB集群版需要部署zookeeper、nameserver、tablet等模块。其中zo
#### 1. 下载zookeeper安装包
```
wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
+tar -zxvf zookeeper-3.4.14.tar.gz
cd zookeeper-3.4.14
cp conf/zoo_sample.cfg conf/zoo.cfg
```
@@ -183,7 +184,7 @@ clientPort=7181
#### 3. 启动Zookeeper
```
-sh bin/zkServer.sh start
+bash bin/zkServer.sh start
```
部署zookeeper集群[参考这里](https://zookeeper.apache.org/doc/r3.4.14/zookeeperStarted.html#sc_RunningReplicatedZooKeeper)
@@ -191,10 +192,10 @@ sh bin/zkServer.sh start
### 部署tablet
#### 1 下载OpenMLDB部署包
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-tablet-0.5.2
-cd openmldb-tablet-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-tablet-0.6.3
+cd openmldb-tablet-0.6.3
```
#### 2 修改配置文件conf/tablet.flags
* 修改endpoint。endpoint是用冒号分隔的部署机器ip/域名和端口号
@@ -214,7 +215,7 @@ cd openmldb-tablet-0.5.2
* zk_cluster和zk_root_path配置和nameserver的保持一致
#### 3 启动服务
```
-sh bin/start.sh start tablet
+bash bin/start.sh start tablet
```
重复以上步骤部署多个tablet
@@ -226,10 +227,10 @@ sh bin/start.sh start tablet
### 部署nameserver
#### 1 下载OpenMLDB部署包
````
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-ns-0.5.2
-cd openmldb-ns-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-ns-0.6.3
+cd openmldb-ns-0.6.3
````
#### 2 修改配置文件conf/nameserver.flags
* 修改endpoint。endpoint是用冒号分隔的部署机器ip/域名和端口号
@@ -239,12 +240,11 @@ cd openmldb-ns-0.5.2
--endpoint=172.27.128.31:6527
--zk_cluster=172.27.128.33:7181,172.27.128.32:7181,172.27.128.31:7181
--zk_root_path=/openmldb_cluster
---enable_distsql=true
```
**注: endpoint不能用0.0.0.0和127.0.0.1**
#### 3 启动服务
```
-sh bin/start.sh start nameserver
+bash bin/start.sh start nameserver
```
重复上述步骤部署多个nameserver
@@ -258,7 +258,7 @@ $ ./bin/openmldb --zk_cluster=172.27.128.31:7181,172.27.128.32:7181,172.27.128.3
```
-### 部署apiserver
+### 部署 APIServer
APIServer负责接收http请求,转发给OpenMLDB并返回结果。它是无状态的,而且并不是OpenMLDB必须部署的组件。
运行前需确保OpenMLDB cluster已经启动,否则APIServer将初始化失败并退出进程。
@@ -266,10 +266,10 @@ APIServer负责接收http请求,转发给OpenMLDB并返回结果。它是无
#### 1 下载OpenMLDB部署包
```
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-apiserver-0.5.2
-cd openmldb-apiserver-0.5.2
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-apiserver-0.6.3
+cd openmldb-apiserver-0.6.3
```
#### 2 修改配置文件conf/apiserver.flags
@@ -293,7 +293,7 @@ cd openmldb-apiserver-0.5.2
#### 3 启动服务
```
-sh bin/start.sh start apiserver
+bash bin/start.sh start apiserver
```
**注**: 如果在linux平台通过发布包启动nameserver/tablet/apiserver时core掉,很可能时指令集不兼容问题,需要通过源码编译openmldb。源码编译参考[这里](./compile.md), 需要采用方式三完整源代码编译。
@@ -302,12 +302,12 @@ sh bin/start.sh start apiserver
#### 1 下载 OpenMLDB 部署包和面向特征工程优化的 Spark 发行版
````
-wget https://github.com/4paradigm/spark/releases/download/v3.0.0-openmldb0.5.2/spark-3.0.0-bin-openmldbspark.tgz
-tar -zxvf spark-3.0.0-bin-openmldbspark.tgz
-wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.2/openmldb-0.5.2-linux.tar.gz
-tar -zxvf openmldb-0.5.2-linux.tar.gz
-mv openmldb-0.5.2-linux openmldb-taskmanager-0.5.2
-cd openmldb-taskmanager-0.5.2
+wget https://github.com/4paradigm/spark/releases/download/v3.2.1-openmldb0.6.3/spark-3.2.1-bin-openmldbspark.tgz
+tar -zxvf spark-3.2.1-bin-openmldbspark.tgz
+wget https://github.com/4paradigm/OpenMLDB/releases/download/v0.6.3/openmldb-0.6.3-linux.tar.gz
+tar -zxvf openmldb-0.6.3-linux.tar.gz
+mv openmldb-0.6.3-linux openmldb-taskmanager-0.6.3
+cd openmldb-taskmanager-0.6.3
````
#### 2 修改配置文件conf/taskmanager.properties
@@ -333,7 +333,7 @@ spark.home=
#### 3 启动服务
```
-bin/start.sh start taskmanager
+bash bin/start.sh start taskmanager
```
#### 4 检查服务是否启动
```bash
diff --git a/docs/zh/developer/contributing.md b/docs/zh/developer/contributing.md
index 0eaf185fd19..3329a635ae2 100644
--- a/docs/zh/developer/contributing.md
+++ b/docs/zh/developer/contributing.md
@@ -1,3 +1,22 @@
-# Contributing
+# 代码贡献
Please refer to [Contribution Guideline](https://github.com/4paradigm/OpenMLDB/blob/main/CONTRIBUTING.md)
+## Pull Request(PR)须知
+
+提交PR时请注意以下几点:
+- PR标题,请遵守[commit格式](https://github.com/4paradigm/rfcs/blob/main/style-guide/commit-convention.md#conventional-commits-reference)。**注意是PR标题,而不是PR中的commits**。
+```{note}
+如果标题不符合标准,`pr-linter / pr-name-lint (pull_request)`将会失败,状态为`x`。
+```
+- PR checks,PR中有很多checks,只有`codecov/patch`和`codecov/project`可以不通过,其他checks都应该通过。如果其他checks不通过,而你无法修复或认为不应修复,可以在PR中留下评论。
+
+- PR说明,请在PR的第一个comment中说明PR的意图。我们提供了PR comment模板,你可以不遵守该模板,但也请保证有足够的解释。
+
+- PR files changed,请注意pr的`files changed`。不要包含PR意图以外的代码改动。基本可以通过`git merge origin/main`再`git push`到PR分支,来消除多余diff。如果你需要帮助,请在PR中评论。
+```{note}
+如果你不是在main分支的基础上修改代码,那么PR希望合入main分支时,`files changed`就会包含多余代码。比如,main分支已经是commit10,你从old main的commit9开始,增加了new_commit1,在new_commit1的基础上,增加new_commit2,实际上你只是想提交new_commit2,但PR中会包含new_commit1和new_commit2。
+这种情况,只需要`git merge origin/main`,再`git push`到PR分支,就可以只有改动部分。
+```
+```{seealso}
+如果你希望分支的代码更加clean,可以不用`git merge`,而是使用`git rebase -i origin/main`,它会将你的更改在main分支的基础上逐一增加。但它会改变commit,你需要`git push -f`来覆盖分支。
+```
diff --git a/docs/zh/developer/index.rst b/docs/zh/developer/index.rst
index 39400de5c63..cfda5b1afc6 100644
--- a/docs/zh/developer/index.rst
+++ b/docs/zh/developer/index.rst
@@ -10,3 +10,4 @@
built_in_function_develop_guide
udf_develop_guide
sdk_develop
+ python_dev
diff --git a/docs/zh/developer/python_dev.md b/docs/zh/developer/python_dev.md
new file mode 100644
index 00000000000..e22b04c871a
--- /dev/null
+++ b/docs/zh/developer/python_dev.md
@@ -0,0 +1,34 @@
+# Python SDK/Tool 开发指南
+
+`python/`中有两个组件,一个Python SDK,一个诊断工具OpenMLDB Tool。
+
+## SDK 测试方法
+
+在根目录执行`make SQL_PYSDK_ENABLE=ON OPENMLDB_BUILD_TARGET=cp_python_sdk_so`,确保`python/openmldb_sdk/openmldb/native/`中使用的是最新的native库。
+
+1. 安装包测试:安装编译好的whl,再`pytest test/`。可直接使用脚本`steps/test_python.sh`。
+1. 动态测试:确认pip中无openmldb,也不要安装编译好的whl,在`python/openmldb_sdk`中执行`pytest test/`即可。这种方式可以方便调试代码。
+
+只运行部分测试,可以使用:
+```
+cd python/openmldb_sdk
+pytest tests/ -k ''
+pytest tests/xxx.py::
+pytest tests/xxx.py::::
+```
+`-k`使用方式见[keyword expressions](https://docs.pytest.org/en/latest/example/markers.html#using-k-expr-to-select-tests-based-on-their-name)。
+
+## Tool 测试
+
+由于Tool中的诊断工具需要ssh免密,所以,即使在本地测试(本地ssh到本地),也需要将当前用户的ssh pub key写入当前用户的authorized_keys。
+
+普通测试:
+```
+cd python/openmldb_tool
+pytest tests/
+```
+
+测试如果需要python log信息:
+```
+pytest -o log_cli=true --log-cli-level=DEBUG tests/
+```
diff --git a/docs/zh/developer/sdk_develop.md b/docs/zh/developer/sdk_develop.md
index 306907cf802..b71efb60f1c 100644
--- a/docs/zh/developer/sdk_develop.md
+++ b/docs/zh/developer/sdk_develop.md
@@ -34,4 +34,44 @@ Python用户层,则是支持Python中比较流行的sqlalchemy,具体实现
我们希望增加更易用的C++ SDK。显然,我们不需要Wrapper层。
所以,理论上讲,开发者只需要用户层的设计与实现,实现中调用SDK核心层。
-但考虑到代码复用,可能会一定程度地改动SDK核心层的代码,或者是调整SDK核心代码结构(比如,暴露SDK核心层的部分头文件等)。
\ No newline at end of file
+但考虑到代码复用,可能会一定程度地改动SDK核心层的代码,或者是调整SDK核心代码结构(比如,暴露SDK核心层的部分头文件等)。
+
+## SDK核心层-细节介绍
+
+由于历史原因,SQLClusterRouter的创建方式有多种。下面一一介绍。
+首先是使用两种Option创建,分别会创建连接Cluster和Standalone两种OpenMLDB服务端。
+```
+ explicit SQLClusterRouter(const SQLRouterOptions& options);
+ explicit SQLClusterRouter(const StandaloneOptions& options);
+```
+这两种常见方式,不会暴露元数据相关的DBSDK,通常给普通用户使用。Java与Python SDK底层也是使用这两种方式。
+
+第三种是基于DBSDK创建:
+```
+ explicit SQLClusterRouter(DBSDK* sdk);
+```
+DBSDK有分为Cluster和Standalone两种,因此也可连接两种OpenMLDB服务端。
+这种方式方便用户额外地读取操作元数据,否则DBSDK在SQLClusterRouter内部不会对外暴露。
+
+例如,由于CLI可以直接通过DBSDK获得nameserver等元数据信息,我们在启动ClusterSQLClient或StandAloneSQLClient时是先创建BDSDK再创建SQLClusterRouter。
+
+## Java Test
+
+如果希望只在submodule中测试,可能会需要其他submodule依赖,比如openmldb-spark-connector依赖openmldb-jdbc。你需要先install编译好的包
+```
+make SQL_JAVASDK_ENABLE=ON
+# 或者
+cd java
+mvn install -DskipTests=true -Dscalatest.skip=true -Dwagon.skip=true -Dmaven.test.skip=true -Dgpg.skip
+```
+然后再
+```
+mvn test -pl openmldb-spark-connector -Dsuites=com._4paradigm.openmldb.spark.TestWrite
+```
+P.S. 如果你实时改动了代码,由于install到本地仓库存在之前的代码编译的jar包,会导致无法测试最新代码。请谨慎使用`-pl`的写法。
+
+如果只想运行java测试:
+```
+mvn test -pl openmldb-jdbc -Dtest="SQLRouterSmokeTest"
+mvn test -pl openmldb-jdbc -Dtest="SQLRouterSmokeTest#AnyMethod"
+```
\ No newline at end of file
diff --git a/docs/zh/maintain/diagnose.md b/docs/zh/maintain/diagnose.md
new file mode 100644
index 00000000000..76ec86ac1b8
--- /dev/null
+++ b/docs/zh/maintain/diagnose.md
@@ -0,0 +1,81 @@
+# 诊断工具
+
+## 概述
+
+为了方便排查用户环境中的常见问题,OpenMLDB提供了诊断工具。主要有以下功能:
+- 版本校验
+- 配置文件检查
+- 日志提取
+- 执行测试SQL
+
+## 使用
+
+1. 下载诊断工具包
+```bash
+ pip install openmldb-tool
+```
+
+2. 准备环境yaml配置文件
+
+单机版yaml
+```yaml
+mode: standalone
+nameserver:
+ -
+ endpoint: 127.0.0.1:6527
+ path: /work/openmldb
+tablet:
+ -
+ endpoint: 127.0.0.1:9527
+ path: /work/openmldb
+```
+
+集群版yaml
+```yaml
+mode: cluster
+zookeeper:
+ zk_cluster: 127.0.0.1:2181
+ zk_root_path: /openmldb
+nameserver:
+ -
+ endpoint: 127.0.0.1:6527
+ path: /work/ns1
+tablet:
+ -
+ endpoint: 127.0.0.1:9527
+ path: /work/tablet1
+ -
+ endpoint: 127.0.0.1:9528
+ path: /work/tablet2
+taskmanager:
+ -
+ endpoint: 127.0.0.1:9902
+ path: /work/taskmanager1
+```
+
+3. 添加机器互信
+
+ 由于诊断工具需要到部署节点上拉取文件,所以需要添加机器互信免密。设置方法参考[这里](https://www.itzgeek.com/how-tos/linux/centos-how-tos/ssh-passwordless-login-centos-7-rhel-7.html)
+
+4. 执行诊断工具命令
+```bash
+openmldb_tool --dist_conf=/tmp/standalone_dist.yml
+```
+诊断工具主要参数如下:
+
+- --dist_conf OpenMLDB节点分布的配置文件
+- --data_dir 数据存放路径。会把远端的配置文件和日志等放在这个目录里,默认为/tmp/diagnose_tool_data
+- --check 检查项,默认为ALL即检查所有。还可以单独配置为CONF/LOG/SQL/VERSION,分别检查配置文件、日志、执行SQL、版本
+- --exclude 不检查其中某一项。只有check设置为ALL才会生效。可以配置为CONF/LOG/SQL/VERSION
+- --log_level 设置日志级别,默认为info。可以设置为debug/warn/info
+- --log_dir 设置结果输出路径,默认为标准输出
+- --env 如果用start-all.sh启动的集群,需要指定为onebox, 其他情况不需要指定
+
+例如指定只检查配置文件,并且结果输出到当前目录下
+```
+openmldb_tool --dist_conf=/tmp/cluster_dist.yml --check=conf --log_dir=./
+```
+
+**注**: 如果是单机版,诊断工具必须在单机版部署节点上执行
+
+可使用`openmldb_tool --helpfull`查看所有配置项。例如,`--sdk_log`可以打印sdk的日志(zk,glog),可用于调试。
\ No newline at end of file
diff --git a/docs/zh/maintain/faq.md b/docs/zh/maintain/faq.md
index 14493fb013b..f7126d5bf10 100644
--- a/docs/zh/maintain/faq.md
+++ b/docs/zh/maintain/faq.md
@@ -6,7 +6,7 @@
虽然有一键启动脚本,但由于配置繁多,可能出现“端口已被占用”,“目录无读写权限”等问题。这些问题都是server进程运行之后才能发现,退出后没有及时反馈。(如果配置了监控,可以通过监控直接检查。)
所以,请先确认集群的所有server进程都正常运行。
-可以通过`ps axu | grep openmldb`来查询。(注意,官方运行脚本中使用`mon`作为守护进程,但`mon`进程运行不代表openmldb server进程正在运行。)
+可以通过`ps axu | grep openmldb`或sql命令`show components;`来查询。(注意,如果你使用了守护进程,openmldb server进程可能是在启动停止的循环中,并不代表持续运行,可以通过日志或`show components;`连接时间来确认。)
如果进程都活着,集群还是表现不正常,需要查询一下server日志。可以优先看WARN和ERROR级日志,很大概率上,它们就是根本原因。
@@ -56,9 +56,8 @@ rpc_client.h:xxx] request error. [E1008] Reached timeout=xxxms
来调大rpc的timeout时间,单位为ms。
#### 普通请求
如果是简单的query或insert,都会出现超时,需要更改通用的`request_timeout`配置。
-1. CLI: 目前无法更改
-2. JAVA: SDK 直连,调整`SdkOption.requestTimeout`; JDBC,调整url中的参数`requestTimeout`
-3. Python: 目前无法更改
+1. CLI: 启动时配置`--request_timeout_ms`
+2. JAVA/Python SDK: Option或url中调整`SdkOption.requestTimeout`
### 2. 为什么收到 Got EOF of Socket 的警告日志?
```
@@ -67,3 +66,41 @@ rpc_client.h:xxx] request error. [E1014]Got EOF of Socket{id=x fd=x addr=xxx} (x
这是因为`addr`端主动断开了连接,`addr`的地址大概率是taskmanager。这不代表taskmanager不正常,而是taskmanager端认为这个连接没有活动,超过keepAliveTime了,而主动断开通信channel。
在0.5.0及以后的版本中,可以调大taskmanager的`server.channel_keep_alive_time`来提高对不活跃channel的容忍度。默认值为1800s(0.5h),特别是使用同步的离线命令时,这个值可能需要适当调大。
在0.5.0以前的版本中,无法更改此配置,请升级taskmanager版本。
+
+### 3. 离线查询结果显示中文为什么乱码?
+
+在使用离线查询时,可能出现包含中文的查询结果乱码,主要和系统默认编码格式与Spark任务编码格式参数有关。
+
+如果出现乱码情况,可以通过添加Spark高级参数`spark.driver.extraJavaOptions=-Dfile.encoding=utf-8`和`spark.executor.extraJavaOptions=-Dfile.encoding=utf-8`来解决。
+
+客户端配置方法可参考[客户端Spark配置文件](../reference/client_config/client_spark_config.md),也可以在TaskManager配置文件中添加此项配置。
+
+```
+spark.default.conf=spark.driver.extraJavaOptions=-Dfile.encoding=utf-8;spark.executor.extraJavaOptions=-Dfile.encoding=utf-8
+```
+
+### 4. 如何配置TaskManager来访问开启Kerberos的Yarn集群?
+
+如果Yarn集群开启Kerberos认证,TaskManager可以通过添加以下配置来访问开启Kerberos认证的Yarn集群。注意请根据实际配置修改keytab路径以及principal账号。
+
+```
+spark.default.conf=spark.yarn.keytab=/tmp/test.keytab;spark.yarn.principal=test@EXAMPLE.COM
+```
+
+### 5. 如何配置客户端的core日志?
+
+客户端core日志主要有两种,zk日志和sdk日志(glog日志),两者是独立的。
+
+zk日志:
+1. CLI:启动时配置`--zk_log_level`调整level,`--zk_log_file`配置日志保存文件。
+2. JAVA/Python SDK:Option或url中使用`zkLogLevel`调整level,`zkLogFile`配置日志保存文件。
+
+- `zk_log_level`(int, 默认=3, 即INFO):
+打印这个等级及**以下**等级的日志。0-禁止所有zk log, 1-error, 2-warn, 3-info, 4-debug。
+
+sdk日志(glog日志):
+1. CLI:启动时配置`--glog_level`调整level,`--glog_dir`配置日志保存文件。
+2. JAVA/Python SDK:Option或url中使用`glogLevel`调整level,`glogDir`配置日志保存文件。
+
+- `glog_level`(int, 默认=0, 即INFO):
+打印这个等级及**以上**等级的日志。 INFO, WARNING, ERROR, and FATAL日志分别对应 0, 1, 2, and 3。
diff --git a/docs/zh/maintain/index.rst b/docs/zh/maintain/index.rst
index 6f5b2c7fb97..ae6781bfbbc 100644
--- a/docs/zh/maintain/index.rst
+++ b/docs/zh/maintain/index.rst
@@ -11,3 +11,4 @@
monitoring
cli
faq
+ diagnose
diff --git a/docs/zh/maintain/scale.md b/docs/zh/maintain/scale.md
index f16a8acb892..0376004bdc3 100644
--- a/docs/zh/maintain/scale.md
+++ b/docs/zh/maintain/scale.md
@@ -11,7 +11,7 @@
- 修改conf/tablet.flags配置文件,zk_cluster和zk_root_path和集群中其他节点保持一致。修改endpoint。
- 启动tablet
```bash
- sh bin/start.sh start tablet
+ bash bin/start.sh start tablet
```
启动后查看新增节点是否加入集群。如果执行showtablet命令列出了新节点endpoint说明已经加入到集群中
@@ -72,10 +72,10 @@ $ ./bin/openmldb --zk_cluster=172.27.128.31:8090,172.27.128.32:8090,172.27.128.3
### 3 下线节点
执行停止命令
```bash
-sh bin/start.sh stop tablet
+bash bin/start.sh stop tablet
```
如果该节点部署有nameserver也需要把nameserver停掉
```bash
-sh bin/start.sh stop nameserver
+bash bin/start.sh stop nameserver
```
-**注**:保持高可用至少需要两个nameserver节点
\ No newline at end of file
+**注**:保持高可用至少需要两个nameserver节点
diff --git a/docs/zh/maintain/upgrade.md b/docs/zh/maintain/upgrade.md
index 10c6254c1f7..16ce83cb1f8 100644
--- a/docs/zh/maintain/upgrade.md
+++ b/docs/zh/maintain/upgrade.md
@@ -8,14 +8,14 @@
* 停止nameserver
```bash
- sh bin/start.sh stop nameserver
+ bash bin/start.sh stop nameserver
```
* 备份旧版本bin和conf目录
* 下载新版本bin和conf
* 对比配置文件diff并修改必要的配置,如endpoint、zk\_cluster等
* 启动nameserver
```bash
- sh bin/start.sh start nameserver
+ bash bin/start.sh start nameserver
```
* 对剩余nameserver重复以上步骤
@@ -23,14 +23,14 @@
* 停止tablet
```bash
- sh bin/start.sh stop tablet
+ bash bin/start.sh stop tablet
```
* 备份旧版本bin和conf目录
* 下载新版本bin和conf
* 对比配置文件diff并修改必要的配置,如endpoint、zk\_cluster等
* 启动tablet
```bash
- sh bin/start.sh start tablet
+ bash bin/start.sh start tablet
```
* 如果auto\_failover关闭时得连上ns client执行如下操作恢复数据。其中**命令后面的endpoint为重启节点的endpoint**
* offlineendpoint endpoint
diff --git a/docs/zh/quickstart/cxx_sdk.md b/docs/zh/quickstart/cxx_sdk.md
new file mode 100644
index 00000000000..c2451c82065
--- /dev/null
+++ b/docs/zh/quickstart/cxx_sdk.md
@@ -0,0 +1,118 @@
+# OpenMLDB C++ SDK 快速上手
+
+## 1. 请先编译安装或下载 C++ SDK 包
+
+编译:
+```
+cd OpenMLDB
+make && make install
+```
+
+## 2. 部署 OpenMLDB Server
+
+详细文件配置及步骤请参考:https://openmldb.ai/docs/zh/v0.6/quickstart/openmldb_quickstart.html
+
+## 3. 编写用户代码
+
+openmldb_api.h 和 sdk/result_set.h 是必须 include 的头文件。
+
+```
+#include
+#include
+#include
+
+#include "openmldb_api.h"
+#include "sdk/result_set.h"
+
+int main()
+{
+ // 创建并初始化 OpenmldbHandler 对象
+ // 单机版:参数(ip, port),如:OpenmldbHandler handler("127.0.0.1", 6527);
+ // 集群版:参数(ip:port, path),如:OpenmldbHandler handler("127.0.0.1:6527", "/openmldb");
+ // 在此以单机版为示例。
+ OpenmldbHandler handler("127.0.0.1", 6527);
+
+ // 定义数据库名
+ std::time_t t = std::time(0);
+ std::string db = "test_db" + std::to_string(t);
+
+ // 创建 SQL 语句,创建数据库
+ std::string sql = "create database " + db + ";";
+ // 执行 SQL 语句,execute() 函数返回 bool 值,值为 true 表示正确执行
+ std::cout << execute(handler, sql);
+
+ // 创建 SQL 语句,使用数据库
+ sql = "use " + db + ";";
+ std::cout << execute(handler, sql);
+
+ // 创建 SQL 语句,创建表
+ sql = "create table test_table ("
+ "col1 string, col2 bigint,"
+ "index(key=col1, ts=col2));";
+ std::cout << execute(handler, sql);
+
+ // 创建 SQL 语句,向表中插入行
+ sql = "insert test_table values(\"hello\", 1)";
+ std::cout << execute(handler, sql);
+ sql = "insert test_table values(\"Hi~\", 2)";
+ std::cout << execute(handler, sql);
+
+ // 普通模式
+ sql = "select * from test_table;";
+ std::cout << execute(handler, sql);
+ // 获得最近一次 SQL 的执行结果
+ auto res = get_resultset();
+ // 输出 SQL 的执行结果
+ print_resultset(res);
+ // 本示例中输出应该为:
+ // +-------+--------+
+ // | col1 | col2 |
+ // +-------+--------+
+ // | hello | 1 |
+ // | Hi~ | 2 |
+ // +-------+---------+
+
+
+
+ // 带参模式
+ // SQL 语句中待填参数的位置用 ? 来表示
+ sql = "select * from test_table where col1 = ? ;";
+ // 创建 ParameterRow 对象,用于填充参数
+ ParameterRow para(&handler);
+ // 填入参数
+ para << "Hi~";
+ // 执行 SQL 语句,execute_parameterized() 函数返回 bool 值,值为 true 表示正确执行
+ execute_parameterized(handler, db, sql, para);
+ res = get_resultset();
+ print_resultset(res);
+ // 本示例中输出应该为:
+ // +------+--------+
+ // | col1 | col2 |
+ // +------+-------+
+ // | Hi~ | 2 |
+ // +------+--------+
+
+
+ // 请求模式
+ sql = "select col1, sum(col2) over w as w_col2_sum from test_table "
+ "window w as (partition by test_table.col1 order by test_table.col2 "
+ "rows between 2 preceding and current row);";
+ RequestRow req(&handler, db, sql);
+ req << "Hi~" << 3l;
+ execute_request(req);
+ res = get_resultset();
+ print_resultset(res);
+ // 本示例中输出应该为:
+ // +------+--------------------+
+ // | col1 | w_col2_sum |
+ // +------+--------------------+
+ // | Hi~ | 5 |
+ // +------+--------------------+
+}
+```
+
+## 4. 编译与运行
+```
+gcc .cxx -o -lstdc++ -std=c++17 -I/include -L/lib -lopenmldbsdk -lpthread
+./
+```
diff --git a/docs/zh/quickstart/java_sdk.md b/docs/zh/quickstart/java_sdk.md
index 855a42a6ff2..6b1671782f6 100644
--- a/docs/zh/quickstart/java_sdk.md
+++ b/docs/zh/quickstart/java_sdk.md
@@ -9,12 +9,12 @@
com.4paradigm.openmldb
openmldb-jdbc
- 0.5.2
+ 0.6.3
com.4paradigm.openmldb
openmldb-native
- 0.5.2
+ 0.6.3
```
### Mac下Java SDK包安装
@@ -24,15 +24,15 @@
com.4paradigm.openmldb
openmldb-jdbc
- 0.5.2
+ 0.6.3
com.4paradigm.openmldb
openmldb-native
- 0.5.2-macos
+ 0.6.3-macos
```
-注意: 由于 openmldb-native 中包含了 OpenMLDB 编译的 C++ 静态库, 默认是 linux 静态库, macOS 上需将上述 openmldb-native 的 version 改成 `0.5.2-macos`, openmldb-jdbc 的版本保持不变。
+注意: 由于 openmldb-native 中包含了 OpenMLDB 编译的 C++ 静态库, 默认是 linux 静态库, macOS 上需将上述 openmldb-native 的 version 改成 `0.6.3-macos`, openmldb-jdbc 的版本保持不变。
## 2. Java SDK快速上手
@@ -173,9 +173,9 @@ execute后,缓存的数据将被清除,无法重试execute。
第三步,使用`PreparedStatement::addBatch()`接口完成一行的填充。
-第四步,继续使用`setType`和`addBatch`,填充多行。
+第四步,继续使用`setType(index, value)`和`addBatch()`,填充多行。
-第五步,使用`PreparedStatement::addBatch()`接口完成批量插入。
+第五步,使用`PreparedStatement::executeBatch()`接口完成批量插入。
```java
String insertSqlWithPlaceHolder = "insert into trans values(\"aa\", ?, 33, ?, 2.4, 1590738993000, \"2020-05-04\");";
@@ -184,7 +184,11 @@ try {
pstmt = sqlExecutor.getInsertPreparedStmt(db, insertSqlWithPlaceHolder);
pstmt.setInt(1, 24);
pstmt.setInt(2, 1.5f);
- pstmt.execute();
+ pstmt.addBatch();
+ pstmt.setInt(1, 25);
+ pstmt.setInt(2, 1.7f);
+ pstmt.addBatch();
+ pstmt.executeBatch();
} catch (SQLException e) {
e.printStackTrace();
Assert.fail();
@@ -336,7 +340,35 @@ try {
}
```
+### 2.9 删除指定索引下某个pk的所有数据
+
+通过java sdk可以有一下两种方式删除:
+
+- 直接执行delete SQL
+- 使用 delete preparestatement
+
+```
+java.sql.Statement state = router.getStatement();
+try {
+ String sql = "DELETE FROM t1 WHERE col2 = 'key1';";
+ state.execute(sql);
+ sql = "DELETE FROM t1 WHERE col2 = ?;";
+ java.sql.PreparedStatement p1 = router.getDeletePreparedStmt("test", sql);
+ p1.setString(1, "key2");
+ p1.executeUpdate();
+ p1.close();
+} catch (Exception e) {
+ e.printStackTrace();
+ Assert.fail();
+} finally {
+ try {
+ state.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+}
+```
## 3. 完整的Java SDK使用范例
diff --git a/docs/zh/quickstart/openmldb_quickstart.md b/docs/zh/quickstart/openmldb_quickstart.md
index 4b68f05fdab..59dfc61bf51 100644
--- a/docs/zh/quickstart/openmldb_quickstart.md
+++ b/docs/zh/quickstart/openmldb_quickstart.md
@@ -19,7 +19,7 @@ Docker engine版本需求 >= 18.03
拉取镜像(镜像下载大小大约 1GB,解压后约 1.7 GB)和启动 docker 容器
```bash
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
````{important}
@@ -265,7 +265,9 @@ cd taxi-trip
注意,`LOAD DATA` 命令为非阻塞,可以通过 `SHOW JOBS` 等离线任务管理命令来查看任务进度。
-如果希望预览数据,用户亦可以使用 `SELECT` 语句,但是离线模式下该命令亦为非阻塞命令,查询结果需要查看日志,在这里不再展开。
+如果希望预览数据,用户亦可以使用 `SELECT` 语句,但是离线模式下该命令亦为非阻塞命令,查询结果需要查看日志(默认在/work/openmldb/taskmanager/bin/logs/jog_x.log,如需更改,修改taskmanager.properties的`job.log.path`)。
+
+如果job failed,可以查看/work/openmldb/taskmanager/bin/logs/jog_x_error.log,确认问题。
#### 3.3.3 离线特征计算
@@ -384,8 +386,8 @@ SELECT c1, c2, sum(c3) OVER w1 AS w1_c3_sum FROM demo_table1 WINDOW w1 AS (PARTI
c1 c2 c3 c4 c5 c6 c7
----- ---- ---- ---------- ----------- --------------- ------------
aaa 11 22 1.2 1.3 1635247427000 2021-05-20
- aaa 12 22 2.200000 12.300000 1636097890000 1970-01-01
aaa 11 22 1.200000 11.300000 1636097290000 1970-01-01
+ aaa 12 22 2.200000 12.300000 1636097890000 1970-01-01
----- ---- ---- ---------- ----------- --------------- ------------
```
2. 窗口范围是`2 PRECEDING AND CURRENT ROW`,所以我们在上表中截取出真正的窗口,请求行就是最小的一行,往前2行都不存在,但窗口包含当前行,因此,窗口只有请求行这一行。
diff --git a/docs/zh/quickstart/python_sdk.md b/docs/zh/quickstart/python_sdk.md
index 434d249f923..248020ab02e 100644
--- a/docs/zh/quickstart/python_sdk.md
+++ b/docs/zh/quickstart/python_sdk.md
@@ -18,10 +18,10 @@ pip install openmldb
import openmldb.dbapi
# 连接集群版OpenMLDB
-db = openmldb.dbapi.connect("db1", "$zkcluster", "$zkpath")
+db = openmldb.dbapi.connect(database="db1", zk="$zkcluster", zkPath="$zkpath")
# 连接单机版OpenMLDB
-# db = openmldb.dbapi.connect("db1", "$host", $port)
+# db = openmldb.dbapi.connect(database="db1", host="$host", port="$port")
cursor = db.cursor()
```
@@ -197,7 +197,7 @@ OpenMLDB Python SDK支持了Notebook magic function拓展,使用下面语句
```
import openmldb
-db = openmldb.dbapi.connect('demo_db','0.0.0.0:2181','/openmldb')
+db = openmldb.dbapi.connect(database='demo_db',zk='0.0.0.0:2181',zkPath='/openmldb')
openmldb.sql_magic.register(db)
```
diff --git a/docs/zh/quickstart/rest_api.md b/docs/zh/quickstart/rest_api.md
index cec31d3e618..ae5b046da5e 100644
--- a/docs/zh/quickstart/rest_api.md
+++ b/docs/zh/quickstart/rest_api.md
@@ -1,5 +1,10 @@
# REST APIs
+## 重要信息
+
+- REST APIs 通过 APIServer 和 OpenMLDB 的服务进行交互,因此 APIServer 模块必须被正确部署才能有效使用。APISever 在安装部署时是可选模块,参照 [APIServer 部署文档](../deploy/install_deploy.md#部署-APIServer)。
+- 现阶段,APIServer 主要用来做功能测试使用,并不推荐用来测试性能,也不推荐在生产环境使用。APIServer 的默认部署目前并没有高可用机制,并且引入了额外的网络和编解码开销。
+
## 数据插入
reqeust url: http://ip:port/dbs/{db_name}/tables/{table_name}
@@ -18,7 +23,7 @@ request body:
+ 目前仅支持一条插入,不可以插入多条数据。
+ 数据需严格按照 schema 排列。
-### 举例
+**数据插入举例**
```
curl http://127.0.0.1:8080/dbs/db/tables/trans -X PUT -d '{
@@ -53,11 +58,11 @@ request body:
+ 可以支持多行,其结果与返回的 response 中的 data.data 字段的数组一一对应。
+ need_schema 可以设置为 true, 返回就会有输出结果的 schema。默认为 false。
-### 举例
+**实时特征计算举例**
```
curl http://127.0.0.1:8080/dbs/demo_db/deployments/demo_data_service -X POST -d'{
- "input": [["aaa", 11, 22, 1.2, 1.3, 1635247427000, "2021-05-20"]],
+ "input": [["aaa", 11, 22, 1.2, 1.3, 1635247427000, "2021-05-20"]]
}'
```
@@ -72,3 +77,37 @@ response:
}
}
```
+
+## 查询
+
+The request URL: http://ip:port/dbs/{db_name}
+
+HTTP method: POST
+
+The request body example:
+
+```json
+{
+ "mode": "online",
+ "sql": "SELECT c1, c2, c3 FROM demo WHERE c1 = ? AND c2 = ?",
+ "input": {
+ "schema": ["Int32", "String"],
+ "data": [1, "aaa"]
+ }
+}
+```
+
+mode: "offsync", "offasync", "online"
+
+The response:
+
+```json
+{
+ "code":0,
+ "msg":"ok",
+ "data": {
+ "schema": ["Int32", "String", "Float"],
+ "data": [[1, "aaa", 1.2], [1, "aaa", 3.4]]
+ }
+}
+```
diff --git a/docs/zh/reference/client_config/client_spark_config.md b/docs/zh/reference/client_config/client_spark_config.md
new file mode 100644
index 00000000000..d89e00a4213
--- /dev/null
+++ b/docs/zh/reference/client_config/client_spark_config.md
@@ -0,0 +1,29 @@
+# 客户端Spark配置文件
+
+## 命令行传递Spark高级参数
+
+OpenMLDB离线任务默认使用Spark执行引擎提交,用户可以在TaskManager配置所有任务的Spark高级参数,也可以在客户端配置单次任务的Spark高级参数,更详细的配置可参考[Spark Configuration](https://spark.apache.org/docs/latest/configuration.html)。
+
+如果需要在SQL命令行修改Spark任务高级参数,可以在本地创建ini格式的配置文件,示例如下。
+
+```
+[Spark]
+spark.driver.extraJavaOptions=-Dfile.encoding=utf-8
+spark.executor.extraJavaOptions=-Dfile.encoding=utf-8
+spark.driver.cores=1
+spark.default.parallelism=1
+spark.driver.memory=4g
+spark.driver.memoryOverhead=384
+spark.driver.memoryOverheadFactor=0.10
+spark.shuffle.compress=true
+spark.files.maxPartitionBytes=134217728
+spark.sql.shuffle.partitions=200
+```
+
+以保存文件成`/work/openmldb/bin/spark.conf`为例,在启动SQL命令行时添加`--spark_conf`参数,示例如下。
+
+```
+./openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --spark_conf=/work/openmldb/bin/spark.conf
+```
+
+如果配置文件不存在或配置有误,提交离线任务时命令行有相应的错误提示。
\ No newline at end of file
diff --git a/docs/zh/reference/client_config/index.rst b/docs/zh/reference/client_config/index.rst
new file mode 100644
index 00000000000..88664312f5f
--- /dev/null
+++ b/docs/zh/reference/client_config/index.rst
@@ -0,0 +1,9 @@
+=============================
+客户端配置
+=============================
+
+
+.. toctree::
+ :maxdepth: 1
+
+ client_spark_config
\ No newline at end of file
diff --git a/docs/zh/reference/index.rst b/docs/zh/reference/index.rst
index 8819b8cef74..1418c1e2a66 100644
--- a/docs/zh/reference/index.rst
+++ b/docs/zh/reference/index.rst
@@ -10,3 +10,4 @@
arch/index
sql/index
ip_tips
+ client_config/index
diff --git a/docs/zh/reference/ip_tips.md b/docs/zh/reference/ip_tips.md
index a182bcd1dcd..58ada72b6ba 100644
--- a/docs/zh/reference/ip_tips.md
+++ b/docs/zh/reference/ip_tips.md
@@ -1,64 +1,140 @@
# IP 配置
-## 物理环境 IP
-跨主机访问OpenMLDB服务,需要将OpenMLDB配置中的endpoint配置`127.0.0.1`改为`0.0.0.0`或公网IP,再启动OpenMLDB服务。请保证端口没有被防火墙阻挡。
+## 概述
+
+OpenMLDB docker镜像或发布包内的ip配置默认都是127.0.0.1,如果是外部访问,需要更改ip配置。如果使用容器,可能还需要更改容器启动方式。
+
+首先,让我们定义下,什么是外部?
+
+- 物理机:一台主机访问另一台主机,就是外部。同一主机上,使用127.0.0.1也可正常通讯;外部则必须使用“被访问主机“的公网IP。
+- 容器:同一主机的容器外,都是外部,包括同一主机的另一个容器、同一主机的物理环境,另外的主机。
+
+其次,让我们明确下,OpenMLDB有哪几种分布形式?
+- onebox,所有OpenMLDB server都在一个环境下,同一物理机或一个容器内。例如,我们的[快速上手](../quickstart/openmldb_quickstart.md),就是将所有进程都放在一个容器内。
+- 分布式,正式生产环境中常用分布式,server在不同物理机上,它们自然是需要绑定公网IP。
+
+由于容器的网络限制,onebox型的OpenMLDB常出现,IP配置错误等问题。相反,分布式由于一定要绑定公网IP,反而没有太多疑问。
+
+下面我们将介绍**onebox型OpenMLDB**如何修改配置实现**外部访问**。
```{attention}
-单机版中,不只是需要改endpoint,nameserver的配置中还有tablet ip `--tablet=`,此处也需要修改。
+单机版中,不只是需要改endpoint,nameserver的配置中的tablet IP `--tablet=`也需要修改。
```
-## Docker IP
+## Onebox型OpenMLDB外部访问
+
+OpenMLDB有多种访问方式,包括HTTP,多种SDK,以及命令行CLI。
+
+### Http
+
+如果你只需要用restful http接口,那么,只需要考虑apiserver的ip是否可访问。(onebox型OpenMLDB的apiserver与其他server在同一环境下,它可以自由访问其他server)。
+
+可以通过
+```
+curl http:///dbs/foo -X POST -d'{"mode":"online", "sql":"show components"}'
+```
+可以确认apiserver是否正常工作。这里的nameserver、tablet server等ip即使是127.0.0.1,也不会有问题,因为apiserver可以通过127.0.0.1访问到这些server。
+
+#### 物理机onebox apiserver
+
+跨主机访问物理机上的onebox,只需要让apiserver的endpoint(绑定ip)改为公网ip。
+
-希望从容器的外部(无论是同一主机还是跨主机)访问容器内,请先
-更改endpoint`127.0.0.1`为 `0.0.0.0`(单机版中`tablet`配置项也需要更改),以避免不必要的麻烦。
+#### 容器onebox apiserver
-### 容器外部访问(同一主机)
-在同一主机中,想要从**容器的外部**(物理机或者是其他容器)访问**容器内**启动的OpenMLDB服务端,可以直接使用bridge的方式连接,也可以暴露端口,还可以直接使用host网络模式。
+如果是本机访问容器onebox中的apiserver,可以**任选一种**下面的方式:
+ - 可以通过bridge的方式,只需让apiserver的endpoint改为`0.0.0.0`(也就是绑定容器ip),然后http使用容器ip即可。
+ ```{note}
+ bridge IP通过`docker network inspect bridge`来查看,通过容器ID或Name找到IP。
-```{caution}
-Docker Desktop for Mac无法支持从物理机访问容器(以下任何模式都不能),参考[i-cannot-ping-my-containers](https://docs.docker.com/desktop/mac/networking/#i-cannot-ping-my-containers)。
+ Docker Desktop for Mac无法支持从物理机访问容器(以下任何模式都不能),参考[i-cannot-ping-my-containers](https://docs.docker.com/desktop/mac/networking/#i-cannot-ping-my-containers)。
但macOS中,可以从容器内访问其他容器。
+ ```
+ - 暴露端口,也需要修改apiserver的endpoint改为`0.0.0.0`。这样可以使用127.0.0.1或是公网ip访问到apiserver。
+ 单机版:
+ ```
+ docker run -p 8080:8080 -it 4pdosc/openmldb:0.6.3 bash
+ ```
+ 集群版:
+ ```
+ docker run -p 9080:9080 -it 4pdosc/openmldb:0.6.3 bash
+ ```
+ - 使用host网络,可以不用修改endpoint配置。缺点是容易引起端口冲突。
+ ```
+ docker run --network host -it 4pdosc/openmldb:0.6.3 bash
+ ```
+
+如果是跨主机访问容器onebox中的apiserver,可以**任选一种**下面的方式:
+ - 暴露端口,并修改apiserver的endpoint改为`0.0.0.0`。docker启动详情见上。
+ - 使用host网络,并修改apiserver的endpoint改为`0.0.0.0`或是公网IP。docker启动详情见上。
+
+只需要让apiserver的endpoint(绑定ip)改为公网ip,使它可访问。apiserver与集群内server的交互都在同一台server上,或同一容器内,并不需要更改。
+还是跨主机
+
+### CLI/SDK
+
+如果你需要在外部使用CLI/SDK,情况比只连接apiserver要复杂,需要保证CLI/SDK能访问到tablet server和taskmanager server。
+```{seealso}
+由于server间内部通信是使用`endpoint`绑定的ip通信,而CLI/SDK也是直接获取同样的ip,直连tablet server或taskmanager,因此,nameserver和tablet/taskmanager通信正常,CLI/SDK却有可能因为跨主机或容器,无法正常连接到tablet/taskmanager。
```
-#### bridge连接
-bridge连接不需要更改docker run命令,只需要查询一下bridge ip。
+你可以通过这样一个简单的SQL脚本来测试确认连接是否正常。
```
-docker network inspect bridge
+show components;
+create database db;
+use db;
+create table t1(c1 int);
+set @@execute_mode='online';
+insert into t1 values(1);
+select * from t1;
```
-查看“Containers”字段,可以看到每个容器绑定的ip,客户端使用该ip就可以进行访问。
+其中`show components`可以看到CLI获得的tablet/taskmanager ip是什么样的。`insert`语句可以测试是否能连接并将数据写入tablet server。
-例如,启动容器并运行OpenMLDB单机版后,inspect结果为`172.17.0.2`,那么CLI连接可以使用:
+下面,我们分情况讨论如何配置。
+
+#### CLI/SDK->物理机onebox
+
+跨主机访问物理机上的onebox,只需将所有endpoint改为公网IP。
+
+可使用以下命令快速修改。
+单机版:
+```
+sed -i s/127.0.0.1//g openmldb/conf/standalone*
```
-../openmldb/bin/openmldb --host 172.17.0.2 --port 6527
+集群版:
+简单地可以更改所有conf文件,
```
+sed -i s/127.0.0.1//g openmldb/conf/*
+```
+或者,精确的只修改集群版的配置文件。
+```
+cd /work/openmldb/conf/ && ls | grep -v _ | xargs sed -i s/127.0.0.1//g && cd -
+```
+
+#### CLI/SDK->容器onebox
-#### 暴露端口
-在启动容器时通过 `-p` 暴露端口,客户端可以使用本机ip地址或回环地址进行访问。
+如果是本机的容器外CLI访问容器onebox,可以**任选一种**下面的方式:
+
+- bridge连接,bridge IP查看参考[容器onebox-apiserver](#容器onebox-apiserver),将所有endpoint配置改为bridge ip。不可以是`0.0.0.0`,容器外CLI/SDK无法通过`0.0.0.0`找到容器内的server。
+
+- 暴露端口,并将conf所有endpoint改为bridge IP或`0.0.0.0`。本机也可以顺利通信。
单机版需要暴露三个组件(nameserver,tabletserver,apiserver)的端口:
```
-docker run -p 6527:6527 -p 9921:9921 -p 8080:8080 -it 4pdosc/openmldb:0.5.2 bash
+docker run -p 6527:6527 -p 9921:9921 -p 8080:8080 -it 4pdosc/openmldb:0.6.3 bash
```
集群版需要暴露zk端口与所有组件的端口:
```
-docker run -p 2181:2181 -p 7527:7527 -p 10921:10921 -p 10922:10922 -p 8080:8080 -p 9902:9902 -it 4pdosc/openmldb:0.5.2 bash
+docker run -p 2181:2181 -p 7527:7527 -p 10921:10921 -p 10922:10922 -p 8080:8080 -p 9902:9902 -it 4pdosc/openmldb:0.6.3 bash
```
-```{tip}
-`-p` 将“物理机端口”和“容器内端口”进行绑定,可能出现“容器端口号”在物理机上已被使用的情况。
+- 使用host网络,可以不用修改endpoint配置。见[容器onebox-apiserver](#容器onebox-apiserver)
-如果OpenMLDB服务仅在单个容器内,只需要改变一下暴露的物理机端口号,客户端相应地改变访问端口。各个服务进程的配置项不需要更改。
+如果是跨主机使用CLI/SDK访问问容器onebox,只能通过`--network host`,并更改所有endpoint为公网IP,才能顺利访问。
-如果OpenMLDB服务进程是分布式的,在多个容器内,出现“端口号被占用”,我们不推荐“切换暴露端口号”的方式,请改变配置的端口号,暴露时使用同样的端口号。
-```
+```{tip}
+`-p` 将“物理机端口”和“容器内端口”进行绑定,可能出现“容器端口号”在物理机上已被使用的情况。我们不推荐“切换暴露端口号”的方式,请改变conf中endpoint的端口号,暴露时使用同样的端口号。
-#### host network
-或者更方便地,使用 host networking,不进行端口隔离,例如:
-```
-docker run --network host -it 4pdosc/openmldb:0.5.2 bash
+暴露端口的模式,会无法绑定物理机ip(容器中仅有docker bridge ip和127.0.0.1),所以,想要绑定公网IP,必须使用host网络。
```
-但这种情况下,很容易出现端口已被主机中其他进程占用。如果出现占用,请仔细更改端口号。
-
-### 跨主机访问本机容器
-除了bridge模式无法做到跨主机访问,暴露端口和host network的方法均可以实现**跨主机**访问本机容器。
diff --git a/docs/zh/reference/sql/data_types/date_and_time_types.md b/docs/zh/reference/sql/data_types/date_and_time_types.md
index e9213548a9a..9bba51438e7 100644
--- a/docs/zh/reference/sql/data_types/date_and_time_types.md
+++ b/docs/zh/reference/sql/data_types/date_and_time_types.md
@@ -1,13 +1,13 @@
# 日期与时间类型
-OpenMLDB支持日期类型`DATE`和时间戳`TIMESTAMP`
+OpenMLDB支持日期类型`DATE`和时间戳`TIMESTAMP`。
每个时间类型有一个有效值范围和一个NULL值,当指定不合法不能表示的值时使用NULL值。
| 类型 | 大小 (bytes) | 范围 | 格式 | 用途 |
| :-------- | :----------- | :----------------------------------------------------------- | :-------------- | :----------------------- |
| DATE | 4 | 1900-01-01 ~ | YYYY-MM-DD | 日期值 |
-| TIMESTAMP | 8 | 1970-01-01 00:00:00/2038结束时间是第 **2147483647** 秒,北京时间 **2038-1-19 11:14:07**,格林尼治时间 2038年1月19日 凌晨 03:14:07 | YYYYMMDD HHMMSS | 混合日期和时间值,时间戳 |
+| TIMESTAMP | 8 | ~ INT64_MAX | 在线: int64, 离线`LOAD DATA`: int64 或 'yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]' | 混合日期和时间值,时间戳 |
## 时区处理
diff --git a/docs/zh/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md b/docs/zh/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
index 7e78efd0c3d..83a54a8902f 100644
--- a/docs/zh/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/CREATE_DATABASE_STATEMENT.md
@@ -20,14 +20,14 @@ DBName ::=
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
-在创建一个名字为`db2`的数据库:
+再创建一个名字为`db2`的数据库:
```sql
CREATE DATABASES db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
显示数据库列表:
@@ -61,4 +61,4 @@ CREATE DATABASE db1;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](../ddl/SHOW_STATEMENT.md#show-databases)
\ No newline at end of file
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
diff --git a/docs/zh/reference/sql/ddl/CREATE_INDEX_STATEMENT.md b/docs/zh/reference/sql/ddl/CREATE_INDEX_STATEMENT.md
new file mode 100644
index 00000000000..9d4f93bdd56
--- /dev/null
+++ b/docs/zh/reference/sql/ddl/CREATE_INDEX_STATEMENT.md
@@ -0,0 +1,62 @@
+# CREATE INDEX
+
+`CREATE INDEX` 语句用来创建索引。 如果表里有数据,添加索引会发起异步任务来加载数据。
+通过`ns_client`中的`showopstatus`命令可以查看任务状态,详见[运维 CLI](../../../maintain/cli.md#showopstatus)。
+
+## 语法
+
+```sql
+CreateIndexstmt ::=
+ 'CREATE' 'INDEX' IndexName ON TableName IndexColumn OptOptionsList
+
+IndexName ::= Identifier
+
+TableName ::=
+ Identifier ('.' Identifier)?
+
+
+IndexColumn ::=
+ IndexColumnPrefix ")"
+
+IndexColumnPrefix ::=
+ "(" ColumnExpression
+ | IndexColumnPrefix "," ColumnExpression
+
+ColumnExpression ::=
+ Identifier
+
+OptOptionsList ::=
+ "OPTIONS" OptionList
+
+OptionList ::=
+ OptionsListPrefix ")"
+
+OptionsListPrefix ::=
+ "(" OptionEntry
+ | OptionsListPrefix "," OptionEntry
+
+OptionEntry ::=
+ Identifier "=" Identifier
+
+```
+
+
+
+## **示例**
+```SQL
+CREATE INDEX index2 ON t5 (col2);
+-- SUCCEED
+```
+```{note}
+如果不指定Options, 创建的索引就没有指定`TS`列,因此不能用在需要上线的SQL中。
+```
+我们可以通过类似如下命令在创建索引时指定`TS`列:
+```SQL
+CREATE INDEX index3 ON t5 (col3) OPTIONS (ts=ts1, ttl_type=absolute, ttl=30d);
+-- SUCCEED
+```
+关于`TTL`和`TTL_TYPE`的更多信息参考[这里](./CREATE_TABLE_STATEMENT.md)
+
+## 相关SQL
+
+[DROP INDEX](./DROP_INDEX_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/zh/reference/sql/ddl/CREATE_TABLE_STATEMENT.md b/docs/zh/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
index 0fdc2a71e3b..81fd01450a2 100644
--- a/docs/zh/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/CREATE_TABLE_STATEMENT.md
@@ -1,40 +1,37 @@
# CREATE TABLE
+ `CREATE TABLE` 语句用于创建一张表。同一个数据库下,表名必须是唯一的,在同一个数据库下,重复创建同名表,会发生错误。
## Syntax
```sql
CreateTableStmt ::=
- 'CREATE' 'TABLE' IfNotExists TableName (
- TableElementList CreateTableSelectOpt | LikeTableWithOrWithoutParen ) OnCommitOpt
+ 'CREATE' 'TABLE' IfNotExists TableName ( TableElementList CreateTableSelectOpt | LikeTableWithOrWithoutParen ) OnCommitOpt
IfNotExists ::=
('IF' 'NOT' 'EXISTS')?
-
+
TableName ::=
Identifier ('.' Identifier)?
TableElementList ::=
TableElement ( ',' TableElement )*
-
+
TableElement ::=
- ColumnDef
-| ColumnIndex
+ ColumnDef | ColumnIndex
```
- `CREATE TABLE` 语句用于创建一张表。同一个数据库下,表名在必须是唯一的,在同一个数据库下,重复创建同名表,会发生错误。
+建表语句中需要定义`TableElementList`,即`TableElement`列表。`TableElement`分为列描述`ColumnDef`和列索引`ColumnIndex`。OpenMLDB要求`TableElement`列表中至少包含一个`ColumnDef`。
-建表语句中需要定义`table_element`列表。`table_element`分为列描述`ColumnDef`和`Constraint`。OpenMLDB要求`table_element`列表中至少包含一个`ColumnDef`。
-### 相关语法元素
-#### 列描述ColumnDef(必要)
+### 列描述ColumnDef(必要)
```SQL
ColumnDef ::=
ColumnName ( ColumnType ) [ColumnOptionList]
-
-ColumnName
- ::= Identifier ( '.' Identifier ( '.' Identifier )? )?
+
+ColumnName ::=
+ Identifier ( '.' Identifier ( '.' Identifier )? )?
ColumnType ::=
'INT' | 'INT32'
@@ -44,47 +41,51 @@ ColumnType ::=
|'DOUBLE'
|'TIMESTAMP'
|'DATE'
+ |'BOOL'
|'STRING' | 'VARCHAR'
-ColumnOptionList
- ::= ColumnOption*
-ColumnOption
- ::= ['DEFAULT' DefaultValueExpr ] ['NOT' 'NULL']
+ColumnOptionList ::=
+ ColumnOption*
+
+ColumnOption ::=
+ ['DEFAULT' DefaultValueExpr ] ['NOT' 'NULL']
-DefaultValueExpr
- ::= int_literal | float_literal | double_literal | string_literal
+DefaultValueExpr ::=
+ int_literal | float_literal | double_literal | string_literal
```
-一张表中包含一个或多个列。每一列的列描述`ColumnDef`描述了列名、列类型以及类配置。
+一张表中包含一个或多个列。每一列的列描述`ColumnDef`描述了列名、列类型以及列约束配置。
- 列名:列在表中的名字。同一张表内的列名必须是唯一的。
-- 列类型:列的类型。想要了解OpenMLDB支持的数据类型,可以参考[数据类型](../data_types/reference.md)。
+- 列类型:列的类型。关于OpenMLDB支持的数据类型,详见[数据类型](../data_types)。
- 列约束配置:
- - `NOT NULL`: 配置列的不允许为空值。
- - `DEFAULT`: 配置列默认值。`NOT NULL`的属性会同时配置`DEFAULT`默认值,这样的话,查入数据时,若没有定义该列的值,会插入默认值。若配置`NOT NULL`属性且没有配置`DEFAULT`值,插入语句中未定义改列值时,OpenMLDB会抛出错误。
+ - `NOT NULL`: 该列的取值不允许为空。
+ - `DEFAULT`: 设置该列的默认值。`NOT NULL`的属性推荐同时配置`DEFAULT`默认值,在插入数据时,若没有定义该列的值,会插入默认值。若设置了`NOT NULL`属性但没有配置`DEFAULT`值,插入语句中未定义该列值时,OpenMLDB会抛出错误。
-##### Example: 创建一张表
+#### Example
+ **示例1:创建一张表**
将当前数据库设为`db1`,在当前数据库中创建一张表`t1`,包含列`col0`,列类型为STRING
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
-
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
-
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
-
+-- SUCCEED
```
-
-指定在数据库`db1`中创建一张表`t1`,包含列`col0`,列类型为STRING
+假如当前会话不在数据库`db1`下,但是仍要在`db1`中创建一张表`t2`,包含列`col0`,列类型为STRING;列`col1`,列类型为int。
```sql
-CREATE TABLE db1.t1 (col0 STRING, col1 int);
--- SUCCEED: Create successfully
-desc t1;
+CREATE TABLE db1.t2 (col0 STRING, col1 int);
+-- SUCCEED
+```
+切换到数据库`db1`,查看表`t2`的详细信息。
+```sql
+USE db1;
+-- SUCCEED: Database changed
+desc t2;
--- ------- --------- ------ ---------
# Field Type Null Default
--- ------- --------- ------ ---------
@@ -96,115 +97,106 @@ desc t1;
--- -------------------- ------ ---- ------ ---------------
1 INDEX_0_1639524201 col0 - 0min kAbsoluteTime
--- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-##### Example: 创建一张表,配置列不允许为空NOT NULL
+
+**示例2:在同一个数据库下重复创建同名表**
```sql
-USE db1;
CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
+-- SUCCEED
+CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
+-- Error: table already exists
+CREATE TABLE t1 (col0 STRING NOT NULL, col1 string);
+-- Error: table already exists
```
-```sql
-desc t1;
- --- ------- --------- ------ ---------
- # Field Type Null Default
- --- ------- --------- ------ ---------
- 1 col0 Varchar NO
- 2 col1 Int YES
- --- ------- --------- ------ ---------
- --- -------------------- ------ ---- ------ ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---- ------ ---------------
- 1 INDEX_0_1639523978 col0 - 0min kAbsoluteTime
- --- -------------------- ------ ---- ------ ---------------
-```
-##### Example: 创建一张表,配置列配置默认值
+**示例3:创建一张表,配置列不允许为空(NOT NULL)**
```sql
USE db1;
-CREATE TABLE t1 (col0 STRING DEFAULT "NA", col1 int);
--- SUCCEED: Create successfully
+-- SUCCEED: Database changed
+CREATE TABLE t3 (col0 STRING NOT NULL, col1 int);
+-- SUCCEED
```
-
+查看该表的详细信息
```sql
-desc t1;
---- ------- --------- ------ ---------
- # Field Type Null Default
---- ------- --------- ------ ---------
- 1 col0 Varchar NO NA
- 2 col1 Int YES
---- ------- --------- ------ ---------
---- -------------------- ------ ---- ------ ---------------
- # name keys ts ttl ttl_type
---- -------------------- ------ ---- ------ ---------------
- 1 INDEX_0_1639524344 col0 - 0min kAbsoluteTime
---- -------------------- ------ ---- ------ ---------------
+desc t3;
+ --- ------- --------- ------ ---------
+ # Field Type Null Default
+ --- ------- --------- ------ ---------
+ 1 col0 Varchar NO
+ 2 col1 Int YES
+ --- ------- --------- ------ ---------
+ --- -------------------- ------ ---- ------ ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---- ------ ---------------
+ 1 INDEX_0_1657327434 col0 - 0min kAbsoluteTime
+ --- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-##### Example: 在同一个数据库下重复创建同名表
+
+**示例4:创建一张表,设置列默认值**
```sql
USE db1;
-CREATE TABLE t1 (col0 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
-CREATE TABLE t1 (col1 STRING NOT NULL, col1 int);
--- SUCCEED: Create successfully
+--SUCCEED: Database changed
+CREATE TABLE t4 (col0 STRING DEFAULT "NA", col1 int);
+-- SUCCEED
+desc t4;
+ --- ------- --------- ------ ---------
+ # Field Type Null Default
+ --- ------- --------- ------ ---------
+ 1 col0 Varchar YES NA
+ 2 col1 Int YES
+ --- ------- --------- ------ ---------
+ --- -------------------- ------ ---- ------ ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---- ------ ---------------
+ 1 INDEX_0_1657327593 col0 - 0min kAbsoluteTime
+ --- -------------------- ------ ---- ------ ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
-#### 列索引ColumnIndex(可选)
+
+### 列索引ColumnIndex(可选)
```sql
-ColumnIndex
- ::= 'INDEX' IndexName '(' IndexOptionList ')'
+ColumnIndex ::=
+ 'INDEX' '(' IndexOptionList ')'
-IndexOptionList
- ::= IndexOption ( ',' IndexOption )*
-IndexOption
- ::= 'KEY' '=' ColumnNameList
- | 'TS' '=' ColumnName
- |
- | 'TTL' = int_literal
- | 'REPLICANUM' = int_literal
-
--- IndexKeyOption
-IndexKeyOption
- ::= 'KEY' '=' ColumnNameList
-ColumnNameList
- :: = '(' ColumnName (',' ColumnName)* ')'
--- IndexTsOption
-IndexTsOption
- ::= 'TS' '=' ColumnName
--- IndexTtlTypeOption
-IndexTtlTypeOption
- ::= 'TTL_TYPE' '=' TTLType
-TTLType ::=
- 'ABSOLUTE'
- | 'LATEST'
- | 'ABSORLAT'
- | 'ABSANDLAT'
+IndexOptionList ::=
+ IndexOption ( ',' IndexOption )*
--- IndexTtlOption
-IndexTtlOption
- ::= 'TTL' '=' int_literal|interval_literal
-
-interval_literal ::= int_literal 'S'|'D'|'M'|'H'
-
-
+IndexOption ::=
+ IndexOptionName '=' expr
```
-索引可以被数据库搜索引擎用来加速数据的检索。 简单说来,索引就是指向表中数据的指针。配置一个列索引一般需要配置索引key,索引时间列, TTL和TTL_TYPE。其中索引key是必须配置的,其他配置项都为可选。下表列出了列索引配置项:
+索引可以被数据库搜索引擎用来加速数据的检索。 简单说来,索引就是指向表中数据的指针。OpenMLDB 支持的索引配置项(`IndexOptionName`)有索引`KEY`,索引时间列`TS`, 最大存活时间/条数`TTL`和淘汰规则`TTL_TYPE`。其中`KEY`是必须配置的,其他配置项都为可选项。下表介绍了各索引配置项的含义、支持的表达式(`expr`)以及用法示例:
-| 配置项 | 描述 | 用法示例 |
-| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
-| `KEY` | 索引列(必选)。OpenMLDB支持单列索引,也支持联合索引。当`KEY`=一列时,配置的是单列索引。当`KEY`=多列时,配置的是这几列的联合索引,具体来说会将几列按顺序拼接成一个字符串作为索引。 | 单列索引:`INDEX(KEY=col1)`
联合索引:`INDEX(KEY=(col1, col2))` |
-| `TS` | 索引时间列(可选)。同一个索引上的数据将按照时间索引列排序。当不显式配置`TS`时,使用数据插入的时间戳作为索引时间。 | `INDEX(KEY=col1, TS=std_time)`。索引列为col1,col1相同的数据行按std_time排序。 |
-| `TTL_TYPE` | 淘汰规则(可选)。包括:`ABSOLUTE`, `LATEST`, `ABSORLAT`, `ABSANDLAT`这四种类型。当不显式配置`TTL_TYPE`时,默认使用`ABSOLUTE`过期配置。 | 具体用法可以参考“TTL和TTL_TYPE的配置细则” |
-| `TTL` | 最大存活时间/条数()可选。不同的TTL_TYPE有不同的配置方式。当不显式配置`TTL`时,`TTL=0`。`TTL`为0表示不设置淘汰规则,OpenMLDB将不会淘汰记录。 | |
+| 配置项 | 描述 | expr | 用法示例 |
+|------------|---------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------|
+| `KEY` | 索引列(必选)。OpenMLDB支持单列索引,也支持联合索引。当`KEY`后只有一列时,仅在该列上建立索引。当`KEY`后有多列时,建立这几列的联合索引:将多列按顺序拼接成一个字符串作为索引。 | 支持单列索引:`ColumnName`
或联合索引:
`(ColumnName (, ColumnName)* ) ` | 单列索引:`INDEX(KEY=col1)`
联合索引:`INDEX(KEY=(col1, col2))` |
+| `TS` | 索引时间列(可选)。同一个索引上的数据将按照时间索引列排序。当不显式配置`TS`时,使用数据插入的时间戳作为索引时间。 | `ColumnName` | `INDEX(KEY=col1, TS=std_time)`。索引列为col1,col1相同的数据行按std_time排序。 |
+| `TTL_TYPE` | 淘汰规则(可选)。包括四种类型,当不显式配置`TTL_TYPE`时,默认使用`ABSOLUTE`过期配置。 | 支持的expr如下:`ABSOLUTE`
`LATEST`
`ABSORLAT`
`ABSANDLAT`。 | 具体用法可以参考下文“TTL和TTL_TYPE的配置细则” |
+| `TTL` | 最大存活时间/条数(可选)。依赖于`TTL_TYPE`,不同的`TTL_TYPE`有不同的`TTL` 配置方式。当不显式配置`TTL`时,`TTL=0`,表示不设置淘汰规则,OpenMLDB将不会淘汰记录。 | 支持数值:`int_literal`
或数值带时间单位(`S,M,H,D`):`interval_literal`
或元组形式:`( interval_literal , int_literal )` |具体用法可以参考下文“TTL和TTL_TYPE的配置细则” |
-TTL和TTL_TYPE的配置细则:
+**TTL和TTL_TYPE的配置细则:**
| TTL_TYPE | TTL | 描述 | 用法示例 |
| ----------- | ------------------------------------------------------------ | ---------------------------------------------------- | ------------------------------------------------------------ |
@@ -213,13 +205,14 @@ TTL和TTL_TYPE的配置细则:
| `ABSORLAT` | 配置过期时间和最大存活条数。配置值是一个2元组,形如`(100m, 10), (1d, 1)`。最大可以配置`(15768000m, 1000)`。 | 当且仅当记录过期**或**记录超过最大条数时,才会淘汰。 | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absorlat)`。当记录超过100条,**或者**当记录过期时,会被淘汰 |
| `ABSANDLAT` | 配置过期时间和最大存活条数。配置值是一个2元组,形如`(100m, 10), (1d, 1)`。最大可以配置`(15768000m, 1000)`。 | 当记录过期**且**记录超过最大条数时,记录会被淘汰。 | `INDEX(key=c1, ts=c6, ttl=(120min, 100), ttl_type=absandlat)`。当记录超过100条,**而且**记录过期时,会被淘汰 |
-##### Example: 创建一张带单列索引的表
+#### Example
+**示例1:创建一张带单列索引的表**
```sql
USE db1;
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -235,14 +228,13 @@ desc t1;
--- -------------------- ------ ---- ------ ---------------
```
-##### Example: 创建一张带联合列索引的表
+**示例2:创建一张带联合列索引的表**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=(col0, col1)));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -256,17 +248,15 @@ desc t1;
--- -------------------- ----------- ---- ------ ---------------
1 INDEX_0_1639524576 col0|col1 - 0min kAbsoluteTime
--- -------------------- ----------- ---- ------ ---------------
-
```
-##### Example: 创建一张带单列索引+时间列的表
+**示例3:创建一张带单列索引+时间列的表**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -282,14 +272,14 @@ desc t1;
--- -------------------- ------ ---------- ------ ---------------
```
-##### Example: 创建一张带单列索引+时间列的TTL type为abusolute表,并配置ttl为30天
+
+**示例4:创建一张带单列索引+时间列的TTL type为abusolute表,并配置ttl为30天**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -305,14 +295,12 @@ desc t1;
--- -------------------- ------ ---------- ---------- ---------------
```
-##### Example: 创建一张带单列索引+时间列的TTL type为latest表,并配置ttl为1
-
+**示例5:创建一张带单列索引+时间列的TTL type为latest表,并配置ttl为1**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=latest, TTL=1));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -328,14 +316,13 @@ desc t1;
--- -------------------- ------ ---------- ----- -------------
```
-##### Example: 创建一张带单列索引+时间列的TTL type为absANDlat表,并配置过期时间为30天,最大留存条数为10条
+**示例6:创建一张带单列索引+时间列的TTL type为absANDlat表,并配置过期时间为30天,最大留存条数为10条**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absandlat, TTL=(30d,10)));
--- SUCCEED: Create successfully
-
+-- SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -349,17 +336,15 @@ desc t1;
--- -------------------- ------ ---------- -------------- ------------
1 INDEX_0_1639525038 col1 std_time 43200min&&10 kAbsAndLat
--- -------------------- ------ ---------- -------------- ------------
-
```
-##### Example: 创建一张带单列索引+时间列的TTL type为absORlat表,并配置过期时间为30天,最大留存条数为10条
+**示例7:创建一张带单列索引+时间列的TTL type为absORlat表,并配置过期时间为30天,最大留存条数为10条**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absorlat, TTL=(30d,10)));
---SUCCEED: Create successfully
-
+--SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -375,13 +360,12 @@ desc t1;
--- -------------------- ------ ---------- -------------- -----------
```
-##### Example: 创建一张多索引的表
+**示例8:创建一张多索引的表**
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col0, TS=std_time), INDEX(KEY=col1, TS=std_time));
---SUCCEED: Create successfully
-
+--SUCCEED
desc t1;
--- ---------- ----------- ------ ---------
# Field Type Null Default
@@ -398,26 +382,22 @@ desc t1;
--- -------------------- ------ ---------- ------ ---------------
```
-#### 表属性TableOptions(可选)
+### 表属性TableOptions(可选)
```sql
TableOptions
::= 'OPTIONS' '(' TableOptionItem (',' TableOptionItem)* ')'
-
TableOptionItem
::= PartitionNumOption
| ReplicaNumOption
| DistributeOption
| StorageModeOption
--- PartitionNum
PartitionNumOption
::= 'PARTITIONNUM' '=' int_literal
--- ReplicaNumOption
ReplicaNumOption
::= 'REPLICANUM' '=' int_literal
--- DistributeOption
DistributeOption
::= 'DISTRIBUTION' '=' DistributionList
DistributionList
@@ -430,11 +410,8 @@ FollowerEndpointList
::= '[' Endpoint (',' Endpoint)* ']'
Endpoint
::= string_literals
-
--- StorageModeOption
StorageModeOption
::= 'STORAGE_MODE' '=' StorageMode
-
StorageMode
::= 'Memory'
| 'HDD'
@@ -446,24 +423,24 @@ StorageMode
| 配置项 | 描述 | 用法示例 |
|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
| `PARTITIONNUM` | 配置表的分区数。OpenMLDB将表分为不同的分区块来存储。分区是OpenMLDB的存储、副本、以及故障恢复相关操作的基本单元。不显式配置时,`PARTITIONNUM`默认值为8。 | `OPTIONS (PARTITIONNUM=8)` |
-| `REPLICANUM` | 配置表的副本数。请注意,副本数只有在Cluster OpenMLDB中才可以配置。 | `OPTIONS (REPLICANUM=3)` |
-| `DISTRIBUTION` | 配置分布式的节点endpoint配置。一般包含一个Leader节点和若干follower节点。`(leader, [follower1, follower2, ..])`。不显式配置是,OpenMLDB会自动的根据环境和节点来配置`DISTRIBUTION`。 | `DISTRIBUTION = [ ('127.0.0.1:6527', [ '127.0.0.1:6528','127.0.0.1:6529' ])]` |
-| `STORAGE_MODE` | 表的存储模式,支持的模式为`Memory`、`HDD`或`SSD`。不显式配置时,默认为`Memory`。
如果需要支持非`Memory`模式的存储模式,`tablet`需要额外的配置选项,具体可参考[tablet配置文件 conf/tablet.flags](../../../deploy/conf.md)。 | `OPTIONS (STORAGE_MODE='HDD')` |
+| `REPLICANUM` | 配置表的副本数。请注意,副本数只有在集群版中才可以配置。 | `OPTIONS (REPLICANUM=3)` |
+| `DISTRIBUTION` | 配置分布式的节点endpoint。一般包含一个Leader节点和若干Follower节点。`(leader, [follower1, follower2, ..])`。不显式配置时,OpenMLDB会自动根据环境和节点来配置`DISTRIBUTION`。 | `DISTRIBUTION = [ ('127.0.0.1:6527', [ '127.0.0.1:6528','127.0.0.1:6529' ])]` |
+| `STORAGE_MODE` | 表的存储模式,支持的模式有`Memory`、`HDD`或`SSD`。不显式配置时,默认为`Memory`。
如果需要支持非`Memory`模式的存储模式,`tablet`需要额外的配置选项,具体可参考[tablet配置文件 conf/tablet.flags](../../../deploy/conf.md)。 | `OPTIONS (STORAGE_MODE='HDD')` |
-##### 磁盘表(`STORAGE_MODE` == `HDD`|`SSD`)与内存表(`STORAGE_MODE` == `Memory`)区别
+#### 磁盘表与内存表区别
+- 磁盘表对应`STORAGE_MODE`的取值为`HDD`或`SSD`。内存表对应的`STORAGE_MODE`取值为`Memory`。
- 目前磁盘表不支持GC操作
-- 磁盘表插入数据,同一个索引下如果(`key`, `ts`)相同,会覆盖老的数据;内存表则会插入一条新的数据
+- 磁盘表插入数据,同一个索引下如果(`key`, `ts`)相同,会覆盖旧的数据;内存表则会插入一条新的数据
- 磁盘表不支持`addindex`和`deleteindex`操作,所以创建磁盘表的时候需要定义好所有需要的索引
(`deploy`命令会自动添加需要的索引,所以对于磁盘表,如果创建的时候缺失对应的索引,则`deploy`会失败)
-##### Example: 创建一张带表,配置分片数为8,副本数为3,存储模式为HDD
-
+#### Example
+创建一张表,配置分片数为8,副本数为3,存储模式为HDD
```sql
USE db1;
-
+--SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time)) OPTIONS(partitionnum=8, replicanum=3, storage_mode='HDD');
---SUCCEED: Create successfully
-
+--SUCCEED
DESC t1;
--- ---------- ----------- ------ ----------
# Field Type Null Default
@@ -483,12 +460,14 @@ DESC t1;
HDD
--------------
```
+创建一张表,指定分片的分布状态
+```sql
+create table t1 (col0 string, col1 int) options (DISTRIBUTION=[('127.0.0.1:30921', ['127.0.0.1:30922', '127.0.0.1:30923']), ('127.0.0.1:30922', ['127.0.0.1:30921', '127.0.0.1:30923'])]);
+--SUCCEED
+```
## 相关SQL
[CREATE DATABASE](../ddl/CREATE_DATABASE_STATEMENT.md)
-[USE DATABASE](../ddl/USE_DATABASE_STATEMENT.md)
-
-
-
+[USE DATABASE](../ddl/USE_DATABASE_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/zh/reference/sql/ddl/DESC_STATEMENT.md b/docs/zh/reference/sql/ddl/DESC_STATEMENT.md
index e2f1c1777c2..1088411dc03 100644
--- a/docs/zh/reference/sql/ddl/DESC_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/DESC_STATEMENT.md
@@ -24,10 +24,10 @@ DESC table_name;
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
然后选择`db1`作为当前数据库:
@@ -41,21 +41,26 @@ USE db1;
```sql
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
---SUCCEED: Create successfully
+--SUCCEED
desc t1;
- --- ---------- ----------- ------ ---------
- # Field Type Null Default
- --- ---------- ----------- ------ ---------
- 1 col0 Varchar YES
- 2 col1 Int YES
- 3 std_time Timestamp YES
- --- ---------- ----------- ------ ---------
- --- -------------------- ------ ---------- ---------- ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---------- ---------- ---------------
- 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
- --- -------------------- ------ ---------- ---------- ---------------
+ --- ---------- ----------- ------ ---------
+ # Field Type Null Default
+ --- ---------- ----------- ------ ---------
+ 1 col0 Varchar YES
+ 2 col1 Int YES
+ 3 std_time Timestamp YES
+ --- ---------- ----------- ------ ---------
+ --- -------------------- ------ ---------- ---------- ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---------- ---------- ---------------
+ 1 INDEX_0_1658136511 col1 std_time 43200min kAbsoluteTime
+ --- -------------------- ------ ---------- ---------- ---------------
+ --------------
+ storage_mode
+ --------------
+ Memory
+ --------------
```
@@ -65,7 +70,7 @@ desc t1;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](./SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
-[SHOW TABLES](../ddl/SHOW_STATEMENT.md)
+[SHOW TABLES](./SHOW_TABLES_STATEMENT.md)
diff --git a/docs/zh/reference/sql/ddl/DROP_DATABASE_STATEMENT.md b/docs/zh/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
index 5fc95107cb6..b4defe31d48 100644
--- a/docs/zh/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/DROP_DATABASE_STATEMENT.md
@@ -10,26 +10,28 @@ DROP DATABASE database_name
## **Example**
-创建一个数据库,并设置为当前数据库:
+创建数据库db1和db2:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
查看数据库列表:
```sql
SHOW DATABASES;
- -----------
- Databases
- -----------
- db1
- db2
- -----------
+ -----------
+ Databases
+ -----------
+ db1
+ db2
+ -----------
+
+2 rows in set
```
删除数据库`db1`
@@ -42,11 +44,13 @@ DROP DATABASE db1;
```sql
SHOW DATABASES;
- -----------
- Databases
- -----------
- db2
- -----------
+ -----------
+ Databases
+ -----------
+ db2
+ -----------
+
+1 rows in set
```
## 相关语句
@@ -55,5 +59,5 @@ SHOW DATABASES;
[CREATE DATABASE](./CREATE_DATABASE_STATEMENT.md)
-[SHOW DATABASES](../ddl/SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
diff --git a/docs/zh/reference/sql/ddl/DROP_INDEX_STATEMENT.md b/docs/zh/reference/sql/ddl/DROP_INDEX_STATEMENT.md
new file mode 100644
index 00000000000..33a9e658f67
--- /dev/null
+++ b/docs/zh/reference/sql/ddl/DROP_INDEX_STATEMENT.md
@@ -0,0 +1,21 @@
+# DROP INDEX
+`DROP INDEX`语句用来删除表中已有的索引。
+
+## 语法
+
+```sql
+DROPIndexstmt ::=
+ 'DROP' 'INDEX' TableName.IndexName
+```
+
+
+
+## **示例**
+```SQL
+DROP INDEX t5.index2;
+-- SUCCEED
+```
+
+## 相关SQL
+
+[CREATE INDEX](./CREATE_INDEX_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/zh/reference/sql/ddl/SET_STATEMENT.md b/docs/zh/reference/sql/ddl/SET_STATEMENT.md
index f0cb8706dcf..c6229b08ef4 100644
--- a/docs/zh/reference/sql/ddl/SET_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/SET_STATEMENT.md
@@ -1,5 +1,8 @@
# SET STATEMENT
+`SET` 语句用于在 OpenMLDB 上设置系统变量。目前OpenMLDB的系统变量包括会话系统变量和全局系统变量。对会话变量的修改,只会影响到当前的会话(也就是当前的数据库连接)。对全局变量的修改会对所有会话生效。
+
+
## Syntax
```sql
@@ -7,32 +10,31 @@ SetStatement ::=
'SET' variableName '=' value
variableName ::=
- | sessionVariableName
+ sessionVariableName
sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Identifier
```
-或者用下面的方式
+或者用下面的语法格式
```sql
'SET' [ GLOBAL | SESSION ] '='
```
-**Description**
-`SET` 语句用于在 OpenMLDB 上设置系统变量。目前OpenMLDB的系统变量包括会话系统变量和全局系统变量。对会话变量的修改,只会影响到当前的会话(也就是当前的数据库连接)。对全局变量的修改会对所有会话生效。
-
-- 会话系统变量一般以`@session前缀`,如SET @@session.execute_mode = "offline"。`注意⚠️:会话系统变量也可以选择直接以`@@`为前缀,即`SET @@execute_mode = "offline"`和前面的配置语句是等价的。变量名是大小写不敏感的。
-- 全局系统变量以`@global为前缀`,如SET @@global.enable_trace = true;
+- 会话系统变量一般以`@session前缀`,如`SET @@session.execute_mode = "offline";`。会话系统变量也可以选择直接以`@@`为前缀,即`SET @@execute_mode = "offline"`和前面的配置语句是等价的。
+- 全局系统变量以`@global为前缀`,如`SET @@global.enable_trace = true;`
+- 会话系统变量也可以选择直接以`@@`为前缀,即`SET @@execute_mode = "offline"`和前面的配置语句是等价的。
- OpenMLDB的SET语句只能用于设置/修改已存在(内置的)的系统变量。
+
## 目前支持的系统变量
### SESSION 系统变量
-| SESSION系统变量 | 变量描述 | 变量值 | 默认值 |
-| -------------------------------------- | ------------------------------------------------------------ | --------------------- | --------- |
-| @@session.execute_mode|@@execute_mode | OpenMDLB在当前会话下的执行模式。目前支持"offline"和"online"两种模式。
在离线执行模式下,只会导入/插入以及查询离线数据。
在在线执行模式下,只会导入/插入以及查询在线数据。 | "offline" \| "online" | "offline" |
-| @@session.enable_trace|@@enable_trace | 控制台的错误信息trace开关。
当开关打开时(`SET @@enable_trace = "true"`),SQL语句有语法错误或者在计划生成过程发生错误时,会打印错误信息栈。
当开关关闭时(`SET @@enable_trace = "false"`),SQL语句有语法错误或者在计划生成过程发生错误时,仅打印基本错误信息。 | "true" \| "false" | "false" |
-| @@session.sync_job|@@sync_job | ...开关。
当开关打开时(`SET @@sync_job = "true"`),离线的命令将变为同步,等待执行的最终结果。
当开关关闭时(`SET @@sync_job = "false"`),离线的命令即时返回,需要通过`SHOW JOB`查看命令执行情况。 | "true" \| "false" | "false" |
-| @@session.sync_timeout|@@sync_timeout | ...
离线命令同步开启的情况下,可配置同步命令的等待时间。超时将立即返回,超时返回后仍可通过`SHOW JOB`查看命令执行情况。 | Int | "20000" |
+| SESSION系统变量 | 变量描述 | 变量值 | 默认值 |
+| -------------------------------------- |---------------------------------------------------------------------------------------------------------------| --------------------- | --------- |
+| @@session.execute_mode|@@execute_mode | OpenMDLB在当前会话下的执行模式。目前支持`offline`和`online`两种模式。
在离线执行模式下,只会导入/插入以及查询离线数据。
在在线执行模式下,只会导入/插入以及查询在线数据。 | "offline" \| "online" | "offline" |
+| @@session.enable_trace|@@enable_trace | 当该变量值为 `true`,SQL语句有语法错误或者在计划生成过程发生错误时,会打印错误信息栈。
当该变量值为 `false`,SQL语句有语法错误或者在计划生成过程发生错误时,仅打印基本错误信息。 | "true" \| "false" | "false" |
+| @@session.sync_job|@@sync_job | 当该变量值为 `true`,离线的命令将变为同步,等待执行的最终结果。
当该变量值为 `false`,离线的命令即时返回,若要查看命令的执行情况,请使用`SHOW JOB`。 | "true" \| "false" | "false" |
+| @@session.sync_timeout|@@sync_timeout | 当sync_job值为`true`的情况下,可配置同步命令的等待时间(以*毫秒*为单位)。超时将立即返回,超时返回后仍可通过`SHOW JOB`查看命令执行情况。 | Int | "20000" |
## Example
@@ -51,6 +53,7 @@ sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Ide
4 rows in set
> SET @@session.execute_mode = "online";
+-- SUCCEED
> SHOW VARIABLES;
--------------- ---------
Variable_name Value
@@ -63,6 +66,7 @@ sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Ide
4 rows in set
> SET @@session.enable_trace = "true";
+ -- SUCCEED
> SHOW VARIABLES;
--------------- ---------
Variable_name Value
@@ -89,6 +93,7 @@ sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Ide
4 rows in set
> SET @@global.enable_trace = "true";
+-- SUCCEED
> SHOW GLOBAL VARIABLES;
--------------- ----------------
Variable_name Variable_value
@@ -108,18 +113,18 @@ sessionVariableName ::= '@@'Identifier | '@@session.'Identifier | '@@global.'Ide
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
---SUCCEED: Create successfully
-
+--SUCCEED
```
- 关闭enable_trace时,执行错误的SQL:
```sql
> set @@enable_trace = "false";
+-- SUCCEED
> select sum(col1) over w1 from t1 window w1 as (partition by col1 order by col0 rows_range between 10d preceding and current row);
-- ERROR: Invalid Order column type : kVarchar
```
@@ -128,6 +133,7 @@ CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=s
```sql
> set @@enable_trace = "true";
+-- SUCCEED
> select sum(col1) over w1 from t1 window w1 as (partition by col1 order by col0 rows_range between 10d preceding and current row);
-- ERROR: Invalid Order column type : kVarchar
(At /Users/chenjing/work/chenjing/OpenMLDB/hybridse/src/vm/sql_compiler.cc:263)
diff --git a/docs/zh/reference/sql/ddl/SHOW_COMPONENTS.md b/docs/zh/reference/sql/ddl/SHOW_COMPONENTS.md
index 1784237c036..6d19c1111f5 100644
--- a/docs/zh/reference/sql/ddl/SHOW_COMPONENTS.md
+++ b/docs/zh/reference/sql/ddl/SHOW_COMPONENTS.md
@@ -1,33 +1,32 @@
# SHOW COMPONENTS
+显示当前 OpenMLDB 系统的各个组件信息。
```sql
-SHOW COMPONENTS
+SHOW COMPONENTS;
```
-显示当前 OpenMLDB 系统的各个组件信息,包括 tablet, nameserver, task manager 和 api server。
+## 输出信息说明
+| Column | Note |
+| ------------ |-------------------------------------------------------------------------|
+| Endpoint | 组件端点,同 `--endpoint` flag |
+| Role | 组件角色,有 `tablet`、`nameserver`、`taskmanager`、`apiserver`, 同 `--role` flag |
+| Connect_time | 组件连接时间,以毫秒时间戳形式展示 |
+| Status | 组件状态, `online`、 `offline`或`NULL` |
+| Ns_role | Namserver 的角色,`master`或 `standby` |
-Column Informations
-
-| Column | Description |
-| ------------ | ------------------------------------------------------------ |
-| Endpoint | component endpoint, same as `--endpoint` flag in openmldb |
-| Role | 组件角色。有 `tablet`,`nameserver`,`taskmanager`,`apiserver`, 同 `--role`flag in openmldb |
-| Connect_time | 组件连接时间,以毫秒时间戳形式展示 |
-| Status | 组件状态, `online`, `offline`or `NULL` |
-| Ns_role | Namserver 的角色,`master`or `standby` |
-
-注意:`SHOW COMPONETS` 目前仍有部分未完善的功能:
-
+```{note}
+`SHOW COMPONETS` 目前仍有部分未完善的功能:
- 不能展示 api server 信息
- 只能展示单个 task manager master 的信息,不能展示其他 slave 节点
-- standalone 模式下 name server 的 connect time 不准确
+- 单机版 nameserver 的 connect time 不准确
+```
-# Example
+## Example
```sql
-> SHOW COMPONENTS;
+SHOW COMPONENTS;
---------------- ------------ --------------- -------- ---------
Endpoint Role Connect_time Status Ns_role
---------------- ------------ --------------- -------- ---------
diff --git a/docs/zh/reference/sql/ddl/SHOW_TABLES_STATEMENT.md b/docs/zh/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
index 340b2f9f552..f74db33e727 100644
--- a/docs/zh/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/SHOW_TABLES_STATEMENT.md
@@ -10,16 +10,16 @@ SHOW TABLES;
```sql
CREATE DATABASE db1;
---SUCCEED: Create database successfully
+--SUCCEED
USE db1;
--SUCCEED: Database changed
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
CREATE TABLE t2(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
SHOW TABLES;
--------
diff --git a/docs/zh/reference/sql/ddl/SHOW_TABLE_STATUS.md b/docs/zh/reference/sql/ddl/SHOW_TABLE_STATUS.md
index 06527d928e6..7ac120995df 100644
--- a/docs/zh/reference/sql/ddl/SHOW_TABLE_STATUS.md
+++ b/docs/zh/reference/sql/ddl/SHOW_TABLE_STATUS.md
@@ -1,37 +1,39 @@
# SHOW TABLE STATUS
+`SHOW TABLE STATUS`命令可以展示当前使用的数据库或者所有数据库下表的详细信息。如果未使用任何数据库(即未执行`USE DATABASE`命令),`SHOW TABLE STATUS`命令将展示所有数据库里表的信息,不包括隐藏数据库;如果使用了特定数据库,将只展示当前数据库下表的信息。
+
```sql
-SHOW TABLE STATUS
+SHOW TABLE STATUS;
```
-展示当前使用的数据库或者所有数据库下 tables 的详细信息。如果未使用任何 database, `SHOW TABLE STATUS`展示所有数据库里 tables 的信息,不包括隐藏数据库;如果使用了特定 database, 只展示当前数据库下 tables 的信息。
+## 输出信息
+| Column | Description |
+| ----------------- |-----------------------------------------------------------|
+| Table_id | 表唯一 id |
+| Table_name | 表名 |
+| Database_name | 数据库名 |
+| Storage_type | 存储类型, `memory`,`ssd`,`hdd` |
+| Rows | 表的 rows count |
+| Memory_data_size | 表内存占用(单位 bytes) |
+| Disk_data_size | 表磁盘占用 (单位 bytes) |
+| Partition | Partiton 数量 |
+| Partition_unalive | Unalive partition 数量 |
+| Replica | Replica 数量 |
+| Offline_path | 表对应 offline 数据路径,仅对离线表生效。 `NULL` 表示未设置该项。 |
+| Offline_format | 表对应 offline 数据格式,仅对离线表生效。 `NULL` 表示未设置该项。 |
+| Offline_deep_copy | 表对应 offline 数据是否使用 deep copy,仅对离线表生效。 `NULL` 表示未设置该项。|
-Column Information
-| Column | Description |
-| ----------------- | ---------------------------------------------------------- |
-| Table_id | 表唯一 id |
-| Table_name | 表名 |
-| Database_name | 数据库名 |
-| Storage_type | 存储类型, `memory`,`ssd`,`hdd` |
-| Rows | 表的 rows count |
-| Memory_data_size | 表内存占用(单位 bytes) |
-| Disk_data_size | 表磁盘占用 (但我 bytes) |
-| Partition | Partiton 数量 |
-| Partition_unalive | Unalive partition 数量 |
-| Replica | Replica 数量 |
-| Offline_path | 表对应 offline 数据路径, `NULL` if not exists |
-| Offline_format | 表对应 offline 数据格式, `NULL` if not exists |
-| Offline_deep_copy | 表对应 offline 数据是否使用 deep copy, `NULL` if not exits |
+## Example
-# Example
```sql
> USE db;
+--SUCCEED: Database changed
> SHOW TABLE STATUS;
---------- ------------ --------------- -------------- ------ ------------------ ---------------- ----------- ------------------- --------- -------------- ---------------- -------------------
Table_id Table_name Database_name Storage_type Rows Memory_data_size Disk_data_size Partition Partition_unalive Replica Offline_path Offline_format Offline_deep_copy
diff --git a/docs/zh/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md b/docs/zh/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
index ba5db03d304..8e1942066cd 100644
--- a/docs/zh/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/SHOW_VARIABLES_STATEMENT.md
@@ -1,51 +1,96 @@
# SHOW VARIABLES
+SHOW VARIABLES 语句用于查看系统变量。其中:
+- `SHOW SESSION VARIABLES`或`SHOW VARIABLES`语句用于显示当前会话的系统变量。
+- `SHOW GLOBAL VARIABLES`可用于查看全局系统变量。
+目前OpenMLDB只支持会话系统变量和全局系统变量,不支持用户变量。对会话变量的修改,只影响当前的会话(也就是当前的数据库连接)。因此,当关闭数据库连接(或者退出控制台)后,再重新连接(或者重新登陆控制台),先前对会话变量的配置和修改都将被重置。
+
+## Syntax
```sql
ShowVariablesStmt ::=
- ShowSessionVariablesStmt
+ ShowSessionVariablesStmt | ShowGlobalVariablesStmt
ShowSessionVariablesStmt ::=
- 'SHOW' 'VARIABLES'
- |'SHOW' 'SESSION' 'VARIABLES'
-
+ 'SHOW' 'VARIABLES'
+ |'SHOW' 'SESSION' 'VARIABLES'
+ShowGlobalVariablesStmt ::=
+ 'SHOW' 'GLOBAL' 'VARIABLES'
```
-`SHOW SESSION VARIABLES`或`SHOW VARIABLES`语句用于显示当前会话的系统变量。
-目前OpenMLDB只支持会话系统变量。对会话变量的修改,只会影响到当前的会话(也就是当前的数据库连接)。因此,当关闭数据库连接(或者退出控制台)后,再重新连接(或者重新登陆控制台),先前对会话变量的配置和修改都将被重置。
+
## Example
```sql
> SHOW SESSION VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
enable_trace false
- execute_mode online
- --------------- --------
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
> SET @@enable_trace = "true"
-
+ --SUCCEED
> SHOW VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
enable_trace true
- execute_mode online
- --------------- --------
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
+
+> SHOW GLOBAL VARIABLES;
+ --------------- ----------------
+ Variable_name Variable_value
+ --------------- ----------------
+ enable_trace false
+ sync_job false
+ job_timeout 20000
+ execute_mode offline
+ --------------- ----------------
+
+4 rows in set
```
-退出控制台后,重新登录控制台
+退出控制台后,重新登录控制台。
```sql
> SHOW SESSION VARIABLES;
- --------------- --------
+ --------------- ---------
Variable_name Value
- --------------- --------
+ --------------- ---------
+ enable_trace false
+ execute_mode offline
+ job_timeout 20000
+ sync_job false
+ --------------- ---------
+
+4 rows in set
+
+
+> SHOW GLOBAL VARIABLES;
+ --------------- ----------------
+ Variable_name Variable_value
+ --------------- ----------------
enable_trace false
- execute_mode online
- --------------- --------
+ sync_job false
+ job_timeout 20000
+ execute_mode offline
+ --------------- ----------------
+
+4 rows in set
```
diff --git a/docs/zh/reference/sql/ddl/USE_DATABASE_STATEMENT.md b/docs/zh/reference/sql/ddl/USE_DATABASE_STATEMENT.md
index 38ae757cd87..674a02089ba 100644
--- a/docs/zh/reference/sql/ddl/USE_DATABASE_STATEMENT.md
+++ b/docs/zh/reference/sql/ddl/USE_DATABASE_STATEMENT.md
@@ -24,10 +24,10 @@ USE database_name;
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
CREATE DATABASE db2;
--- SUCCEED: Create database successfully
+-- SUCCEED
```
然后选择`db1`作为当前数据库:
@@ -41,18 +41,20 @@ USE db1;
```sql
CREATE TABLE t1(col0 string);
--- SUCCEED: Create successfully
+-- SUCCEED
-CREATE TABLE t1(col0 string);
--- SUCCEED: Create successfully
+CREATE TABLE t2(col0 string);
+-- SUCCEED
SHOW TABLES;
- --------
- Tables
- --------
- t1
- t2
- --------
+ --------
+ Tables
+ --------
+ t1
+ t2
+ --------
+
+2 rows in set
```
然后选择`db2`作为当前数据库,并查看当前库下的表:
@@ -72,6 +74,6 @@ SHOW TABLES;
[DROP DATABASE](./DROP_DATABASE_STATEMENT.md)
-[SHOW DATABASES](./SHOW_STATEMENT.md#show-databases)
+[SHOW DATABASES](./SHOW_DATABASES_STATEMENT.md)
-[SHOW TABLES](./SHOW_STATEMENT.md#show-tables)
\ No newline at end of file
+[SHOW TABLES](./SHOW_TABLES_STATEMENT.md)
\ No newline at end of file
diff --git a/docs/zh/reference/sql/ddl/index.rst b/docs/zh/reference/sql/ddl/index.rst
index ec0e1af1804..5d3d80637fd 100644
--- a/docs/zh/reference/sql/ddl/index.rst
+++ b/docs/zh/reference/sql/ddl/index.rst
@@ -18,3 +18,5 @@
SHOW_VARIABLES_STATEMENT
SHOW_TABLE_STATUS
SET_STATEMENT
+ CREATE_INDEX_STATEMENT
+ DROP_INDEX_STATEMENT
diff --git a/docs/zh/reference/sql/deployment_manage/DEPLOY_STATEMENT.md b/docs/zh/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
index 9b84014418e..bac299cd9de 100644
--- a/docs/zh/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
+++ b/docs/zh/reference/sql/deployment_manage/DEPLOY_STATEMENT.md
@@ -4,81 +4,96 @@
```sql
CreateDeploymentStmt
- ::= 'DEPLOY' [DeployOptions] DeploymentName SelectStmt
-
-DeployOptions(可选)
- ::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
-
+ ::= 'DEPLOY' [DeployOptionList] DeploymentName SelectStmt
+
+DeployOptionList
+ ::= DeployOption*
+
+DeployOption
+ ::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
+
DeploymentName
- ::= identifier
+ ::= identifier
```
-`DeployOptions`的定义详见[DEPLOYMENT属性DeployOptions(可选)](#DEPLOYMENT属性DeployOptions(可选)).
-`DEPLOY`语句可以将SQL部署到线上。OpenMLDB仅支持部署[Select查询语句](../dql/SELECT_STATEMENT.md),并且需要满足[OpenMLDB SQL上线规范和要求](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)
-```SQL
-DEPLOY deployment_name SELECT clause
-```
+`DeployOption`的定义详见[DEPLOYMENT属性DeployOption(可选)](#DeployOption可选)。
+
+`SelectStmt`的定义详见[Select查询语句](../dql/SELECT_STATEMENT.md)。
+
+`DEPLOY`语句可以将SQL部署到线上。OpenMLDB仅支持部署Select查询语句,并且需要满足[OpenMLDB SQL上线规范和要求](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md)。
+
-### Example: 部署一个SQL到online serving
-```sqlite
+**Example**
+
+在集群版的在线请求模式下,部署上线一个SQL脚本。
+```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
-CREATE TABLE t1(col0 STRING);
+CREATE TABLE demo_table1(c1 string, c2 int, c3 bigint, c4 float, c5 double, c6 timestamp, c7 date);
-- SUCCEED: Create successfully
-DEPLOY demo_deploy select col0 from t1;
--- SUCCEED: deploy successfully
+DEPLOY demo_deploy SELECT c1, c2, sum(c3) OVER w1 AS w1_c3_sum FROM demo_table1 WINDOW w1 AS (PARTITION BY demo_table1.c1 ORDER BY demo_table1.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW);
+
+-- SUCCEED
```
-查看部署详情:
+我们可以使用 `SHOW DEPLOYMENT demo_deploy;` 命令查看部署的详情,执行结果如下:
```sql
-
-SHOW DEPLOYMENT demo_deploy;
- ----- -------------
- DB Deployment
- ----- -------------
- db1 demo_deploy
- ----- -------------
- 1 row in set
-
- ----------------------------------------------------------------------------------
- SQL
- ----------------------------------------------------------------------------------
- CREATE PROCEDURE deme_deploy (col0 varchar) BEGIN SELECT
- col0
+ --------- -------------------
+ DB Deployment
+ --------- -------------------
+ demo_db demo_deploy
+ --------- -------------------
+1 row in set
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ SQL
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ DEPLOY demo_data_service SELECT
+ c1,
+ c2,
+ sum(c3) OVER (w1) AS w1_c3_sum
FROM
- t1
-; END;
- ----------------------------------------------------------------------------------
+ demo_table1
+WINDOW w1 AS (PARTITION BY demo_table1.c1
+ ORDER BY demo_table1.c6 ROWS BETWEEN 2 PRECEDING AND CURRENT ROW)
+;
+ -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
1 row in set
-
# Input Schema
- --- ------- ---------- ------------
- # Field Type IsConstant
- --- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
+ --- ------- ------------ ------------
+ # Field Type IsConstant
+ --- ------- ------------ ------------
+ 1 c1 Varchar NO
+ 2 c2 Int32 NO
+ 3 c3 Int64 NO
+ 4 c4 Float NO
+ 5 c5 Double NO
+ 6 c6 Timestamp NO
+ 7 c7 Date NO
+ --- ------- ------------ ------------
# Output Schema
- --- ------- ---------- ------------
- # Field Type IsConstant
- --- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
+ --- ----------- ---------- ------------
+ # Field Type IsConstant
+ --- ----------- ---------- ------------
+ 1 c1 Varchar NO
+ 2 c2 Int32 NO
+ 3 w1_c3_sum Int64 NO
+ --- ----------- ---------- ------------
```
-### DEPLOYMENT属性DeployOptions(可选)
+### DeployOption(可选)
```sql
-DeployOptions
+DeployOption
::= 'OPTIONS' '(' DeployOptionItem (',' DeployOptionItem)* ')'
DeployOptionItem
@@ -90,37 +105,52 @@ LongWindowOption
目前只支持长窗口`LONG_WINDOWS`的优化选项。
#### 长窗口优化
-##### 长窗口优化选项格式
```sql
LongWindowDefinitions
- ::= 'LongWindowDefinition (, LongWindowDefinition)*'
+ ::= 'LongWindowDefinition (, LongWindowDefinition)*'
LongWindowDefinition
- ::= 'WindowName[:BucketSize]'
+ ::= WindowName':'[BucketSize]
WindowName
- ::= string_literal
+ ::= string_literal
-BucketSize(可选,默认为)
- ::= int_literal | interval_literal
+BucketSize
+ ::= int_literal | interval_literal
-interval_literal ::= int_literal 's'|'m'|'h'|'d'(分别代表秒、分、时、天)
+interval_literal ::= int_literal 's'|'m'|'h'|'d'
```
-其中`BucketSize`为性能优化选项,会以`BucketSize`为粒度,对表中数据进行预聚合,默认为`1d`。
+其中`BucketSize`为用于性能优化的可选项,OpenMLDB会根据`BucketSize`设置的粒度对表中数据进行预聚合,默认为`1d`。
-示例如下:
-```sqlite
-DEPLOY demo_deploy OPTIONS(long_windows="w1:1d") SELECT col0, sum(col1) OVER w1 FROM t1
- WINDOW w1 AS (PARTITION BY col0 ORDER BY col2 ROWS_RANGE BETWEEN 5d PRECEDING AND CURRENT ROW);
--- SUCCEED: deploy successfully
-```
##### 限制条件
目前长窗口优化有以下几点限制:
-- 仅支持`SelectStmt`只涉及到一个物理表的情况,即不支持包含`join`或`union`的`SelectStmt`
-- 支持的聚合运算仅限:`sum`, `avg`, `count`, `min`, `max`
-- 执行`deploy`命令的时候不允许表中有数据
+- `SelectStmt`仅支持只涉及一个物理表的情况,即不支持包含`join`或`union`的`SelectStmt`。
+
+- 支持的聚合运算仅限:`sum`, `avg`, `count`, `min`, `max`, `count_where`, `min_where`, `max_where`, `sum_where`, `avg_where`。
+
+- 执行`deploy`命令的时候不允许表中有数据。
+
+- 对于带 where 条件的运算,如 `count_where`, `min_where`, `max_where`, `sum_where`, `avg_where` ,有额外限制:
+
+ 1. 主表必须是内存表 (`storage_mode = 'Memory'`)
+
+ 2. `BucketSize` 类型应为范围类型,即取值应为`interval_literal`类,比如,`long_windows='w1:1d'`是支持的, 不支持 `long_windows='w1:100'`。
+
+ 3. where 条件必须是 ` op 或者 op `的格式。
+
+ - 支持的 where op: `>, <, >=, <=, =, !=`
+
+ - where 关联的列 ``,数据类型不能是 date 或者 timestamp
+
+**Example**
+
+```sql
+DEPLOY demo_deploy OPTIONS(long_windows="w1:1d") SELECT c1, sum(c2) OVER w1 FROM demo_table1
+ WINDOW w1 AS (PARTITION BY c1 ORDER BY c2 ROWS_RANGE BETWEEN 5d PRECEDING AND CURRENT ROW);
+-- SUCCEED
+```
## 相关SQL
@@ -129,4 +159,3 @@ DEPLOY demo_deploy OPTIONS(long_windows="w1:1d") SELECT col0, sum(col1) OVER w1
[SHOW DEPLOYMENT](../deployment_manage/SHOW_DEPLOYMENT.md)
[DROP DEPLOYMENT](../deployment_manage/DROP_DEPLOYMENT_STATEMENT.md)
-
diff --git a/docs/zh/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md b/docs/zh/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
index 8b204e625d9..c6f71d555ad 100644
--- a/docs/zh/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
+++ b/docs/zh/reference/sql/deployment_manage/DROP_DEPLOYMENT_STATEMENT.md
@@ -1,10 +1,11 @@
# 删除 DEPLOYMENT
+`DROP DEPLOYMENT`语句用于删除一个在线请求模式下的部署。
+
```SQL
DROP DEPLOYMENT deployment_name
```
-`DROP DEPLOYMENT`语句用于删除一个OnlineServing的部署。
## Example:
@@ -12,27 +13,26 @@ DROP DEPLOYMENT deployment_name
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
```
创建一张表`t1`:
-```
+```sql
CREATE TABLE t1(col0 STRING);
-- SUCCEED: Create successfully
-
```
-部署表t1的查询语句到OnlineServing:
+在线请求模式下,部署表t1的查询语句:
```sql
-> DEPLOY demo_deploy select col0 from t1;
-SUCCEED: deploy successfully
+DEPLOY demo_deploy select col0 from t1;
+-- SUCCEED
```
-查看当前数据库下所有的deployments:
+查看当前数据库下所有的 deployments:
```sql
SHOW DEPLOYMENTS;
@@ -45,21 +45,24 @@ SHOW DEPLOYMENTS;
```
-删除指定的deployment:
+删除指定的 deployment:
```sql
DROP DEPLOYMENT demo_deploy;
-- Drop deployment demo_deploy? yes/no
-- yes
--- SUCCEED: Drop successfully
-
+-- SUCCEED
```
-删除后,再次查看数据库下的deployments,应为是空列表:
+删除后,再次查看数据库下的 deployments,应为空列表:
```sql
SHOW DEPLOYMENTS;
-Empty set
+ ---- ------------
+ DB Deployment
+ ---- ------------
+
+0 rows in set
```
diff --git a/docs/zh/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md b/docs/zh/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md
new file mode 100644
index 00000000000..01a1bf2209f
--- /dev/null
+++ b/docs/zh/reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md
@@ -0,0 +1,122 @@
+# SQL 上线规范和要求
+
+OpenMLDB 的**在线请求模式**能提供实时特征抽取服务。使用[DEPLOY](../deployment_manage/DEPLOY_STATEMENT.md)命令可以将一段SQL命令部署上线。部署成功后,用户可通过 Restful APIs 或者 SDK 实时地对请求样本作特征抽取计算。但是,并非所有的 SQL 都可以部署上线,本文定义了可上线 SQL 的规范要求。
+
+## 在线请求模式支持的语句
+
+OpenMLDB仅支持上线[SELECT查询语句](../dql/SELECT_STATEMENT.md)。
+
+## 在线请求模式 `SELECT` 支持的子句
+
+**部分SELECT查询语句不支持在在线请求模式下执行。** 详见[SELECT查询语句各子句上线情况表](../dql/SELECT_STATEMENT.md#select语句元素)。
+
+下表列出了在线请求模式支持的 `SELECT` 子句。
+
+| SELECT 子句 | 说明 |
+|:-------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------|
+| 单张表的简单表达式计算 | 简单的单表查询是对一张表进行列运算、使用运算表达式或单行处理函数(Scalar Function)以及它们的组合表达式作计算。需要遵循[在线请求模式下单表查询的使用规范](#在线请求模式下单表查询的使用规范) |
+| [`JOIN` 子句](../dql/JOIN_CLAUSE.md) | OpenMLDB目前仅支持**LAST JOIN**。需要遵循[在线请求模式下LAST JOIN的使用规范](#在线请求模式下last-join的使用规范) |
+| [`WINDOW` 子句](../dql/WINDOW_CLAUSE.md) | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数进行分析计算。需要遵循[在线请求模式下Window的使用规范](#在线请求模式下window的使用规范) |
+| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | LIMIT 子句用于限制返回的结果条数。目前LIMIT仅能接受一个参数,表示返回数据的最大行数。 |
+
+## 在线请求模式下 `SELECT` 子句的使用规范
+
+### 在线请求模式下单表查询的使用规范
+
+- 仅支持列运算,表达式,以及单行处理函数(Scalar Function)以及它们的组合表达式运算。
+- 单表查询不包含[GROUP BY子句](../dql/JOIN_CLAUSE.md),[WHERE子句](../dql/WHERE_CLAUSE.md),[HAVING子句](../dql/HAVING_CLAUSE.md)以及[WINDOW子句](../dql/WINDOW_CLAUSE.md)。
+- 单表查询只涉及单张表的计算,不涉及[JOIN](../dql/JOIN_CLAUSE.md)多张表的计算。
+
+**Example: 支持上线的简单SELECT查询语句范例**
+
+```sql
+-- desc: SELECT所有列
+SELECT * FROM t1;
+
+-- desc: SELECT 表达式重命名
+SELECT COL1 as c1 FROM t1;
+
+-- desc: SELECT 表达式重命名2
+SELECT COL1 c1 FROM t1;
+
+-- desc: SELECT 列表达式
+SELECT COL1 FROM t1;
+SELECT t1.COL1 FROM t1;
+
+-- desc: SELECT 一元表达式
+SELECT -COL2 as COL2_NEG FROM t1;
+
+-- desc: SELECT 二元表达式
+SELECT COL1 + COL2 as COL12_ADD FROM t1;
+
+-- desc: SELECT 类型强转
+SELECT CAST(COL1 as BIGINT) as COL_BIGINT FROM t1;
+
+-- desc: SELECT 函数表达式
+SELECT substr(COL7, 3, 6) FROM t1;
+```
+
+### 在线请求模式下 `LAST JOIN` 的使用规范
+
+- 仅支持`LAST JOIN`类型。
+- 至少有一个JOIN条件是形如`left_table.column=right_table.column`的EQUAL条件,**并且`rgith_table.column`列需要命中右表的索引**。
+- 带排序LAST JOIN的情况下,`ORDER BY`只支持列表达式,**并且列需要命中右表索引的时间列**。
+
+**Example: 支持上线的 `LAST JOIN` 语句范例**
+创建两张表以供后续`LAST JOIN`。
+```sql
+CREATE DATABASE db1;
+-- SUCCEED
+
+USE db1;
+-- SUCCEED: Database changed
+
+CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
+-- SUCCEED
+
+CREATE TABLE t2 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
+-- SUCCEED
+
+desc t1;
+ --- ---------- ----------- ------ ---------
+ # Field Type Null Default
+ --- ---------- ----------- ------ ---------
+ 1 col0 Varchar YES
+ 2 col1 Int YES
+ 3 std_time Timestamp YES
+ --- ---------- ----------- ------ ---------
+ --- -------------------- ------ ---------- ---------- ---------------
+ # name keys ts ttl ttl_type
+ --- -------------------- ------ ---------- ---------- ---------------
+ 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
+ --- -------------------- ------ ---------- ---------- ---------------
+```
+在刚刚创建的两张表上进行未排序的`LAST JOIN`,`col1`命中了索引。
+```sql
+ -- last join without order by, 'col1' hit index
+ SELECT
+ t1.col1 as id,
+ t1.col0 as t1_col0,
+ t1.col1 + t2.col1 + 1 as test_col1,
+ FROM t1
+ LAST JOIN t2 ON t1.col1=t2.col1;
+ ```
+在刚刚创建的两张表上进行排序的`LAST JOIN`,`col1`命中了索引,`std_time`命中了右表的索引的时间列。
+```sql
+ -- last join wit order by, 'col1:std_time' hit index
+ SELECT
+ t1.col1 as id,
+ t1.col0 as t1_col0,
+ t1.col1 + t2.col1 + 1 as test_col1,
+ FROM t1
+ LAST JOIN t2 ORDER BY t2.std_time ON t1.col1=t2.col1;
+```
+
+### 在线请求模式下Window的使用规范
+
+- 窗口边界仅支持`PRECEDING`和`CURRENT ROW`
+- 窗口类型仅支持`ROWS`和`ROWS_RANGE`。
+- 窗口`PARTITION BY`只支持列表达式,并且列需要命中索引
+- 窗口`ORDER BY`只支持列表达式,并且列需要命中索引的时间列
+- 可支持使用 `EXCLUDE CURRENT_ROW`,`EXCLUDE CURRENT_TIME`,`MAXSIZE`,`INSTANCE_NOT_IN_WINDOW`对窗口进行其他特殊限制,详见[OpenMLDB特有的 WindowSpec 元素](openmldb特有的-windowspec-元素)。
+
diff --git a/docs/zh/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md b/docs/zh/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md
deleted file mode 100644
index 38434c867df..00000000000
--- a/docs/zh/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# SQL 上线规范和要求
-
-OpenMLDB Online Serving提供实时的特征抽取服务。OpenMLDB的[DEPLOY](../deployment_manage/DEPLOY_STATEMENT.md)命令将一段SQL文本部署到线上去。部署成功后,用户即可通过Restful API或者JDBC API实时地对请求样本作特征抽取计算。并不是所有的SQL都可以部署到线上提供服务的,OpenMLDB对上线的语句和OP是有一套规范的。
-
-## Online Serving 语句
-
-OpenMLDB仅支持上线[SELECT查询语句](../dql/SELECT_STATEMENT.md)。
-
-## Online Serving Op List
-
-值得注意的是,并非所有的SELECT查询语句都可上线,在OpenMLDB中,只有`SELECT`, `WINDOW`, `LAST JOIN` OP是可以上线的,其他OP(包括`WHERE`, `GROUP`, `HAVING`, `LIMIT`)等都是无法上线了。
-
-本节将列出支持Online Serving的OP,并详细阐述这些OP的上线使用规范。
-
-| SELECT语句 | 说明 |
-| :----------------------------------------- | :----------------------------------------------------------- |
-| 单张表简单表达式计算 | 在Online Serving时,支持**简单的单表查询**。所谓,简单的单表查询是对一张表的进行列、运算表达式和单行处理函数(Scalar Function)以及它们的组合表达式作计算。需要遵循[Online Serving下单表查询的使用规范](#online-serving下单表查询的使用规范) |
-| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | OpenMLDB目前仅支持**LAST JOIN**。在Online Serving时,需要遵循[Online Serving下LAST JOIN的使用规范](#online-serving下last-join的使用规范) |
-| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数来进行一些分析型计算的操作(```sql agg_func() over window_name```)。在Online Serving时,需要遵循[Online Serving下Window的使用规范](#online-serving下window的使用规范) |
-
-## Online Serving下OP的使用规范
-
-### Online Serving下单表查询的使用规范
-
-- 仅支持列,表达式,以及单行处理函数(Scalar Function)以及它们的组合表达式运算
-- 单表查询不包含[GROUP BY子句](../dql/JOIN_CLAUSE.md),[WHERE子句](../dql/WHERE_CLAUSE.md),[HAVING子句](../dql/HAVING_CLAUSE.md)以及[WINDOW子句](../dql/WINDOW_CLAUSE.md)。
-- 单表查询只涉及单张表的计算,不设计[JOIN](../dql/JOIN_CLAUSE.md)多张表的计算。
-
-#### Example: 支持上线的简单SELECT查询语句范例
-
-```sql
--- desc: SELECT所有列
-SELECT * FROM t1;
-
--- desc: SELECT 表达式重命名
-SELECT COL1 as c1 FROM t1;
-
--- desc: SELECT 表达式重命名2
-SELECT COL1 c1 FROM t1;
-
--- desc: SELECT 列表达式
-SELECT COL1 FROM t1;
-SELECT t1.COL1 FROM t1;
-
--- desc: SELECT 一元表达式
-SELECT -COL2 as COL2_NEG FROM t1;
-
--- desc: SELECT 二元表达式
-SELECT COL1 + COL2 as COL12_ADD FROM t1;
-
--- desc: SELECT 类型强转
-SELECT CAST(COL1 as BIGINT) as COL_BIGINT FROM t1;
-
--- desc: SELECT 函数表达式
-SELECT substr(COL7, 3, 6) FROM t1;
-```
-
-### Online Serving下LAST JOIN的使用规范
-
-- Join type仅支持`LAST JOIN`类型
-- 至少有一个JOIN条件是形如`left_table.column=right_table.column`的EQUAL条件,并且`rgith_table.column`列需要命中右表的索引
-- 带排序LAST JOIN的情况下,`ORDER BY`只能支持列表达式,并且列需要命中右表索引的时间列
-
-#### Example: 支持上线的简单SELECT查询语句范例
-
-
-
-```sql
-CREATE DATABASE db1;
-
-USE db1;
-CREATE TABLE t1 (col0 STRING, col1 int, std_time TIMESTAMP, INDEX(KEY=col1, TS=std_time, TTL_TYPE=absolute, TTL=30d));
--- SUCCEED: Create successfully
-
-desc t1;
- --- ---------- ----------- ------ ---------
- # Field Type Null Default
- --- ---------- ----------- ------ ---------
- 1 col0 Varchar YES
- 2 col1 Int YES
- 3 std_time Timestamp YES
- --- ---------- ----------- ------ ---------
- --- -------------------- ------ ---------- ---------- ---------------
- # name keys ts ttl ttl_type
- --- -------------------- ------ ---------- ---------- ---------------
- 1 INDEX_0_1639524729 col1 std_time 43200min kAbsoluteTime
- --- -------------------- ------ ---------- ---------- ---------------
-```
-### Online Serving下Window的使用规范
-
-- 窗口边界仅支持`PRECEDING`和`CURRENT ROW`
-- 窗口类型仅支持`ROWS`和`ROWS_RANGE`
-- 窗口`PARTITION BY`只能支持列表达式,并且列需要命中索引
-- 窗口`ORDER BY`只能支持列表达式,并且列需要命中索引的时间列
-
diff --git a/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md b/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
index acf0e3a8183..be4702da3e2 100644
--- a/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
+++ b/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENT.md
@@ -1,10 +1,12 @@
# 查看 DEPLOYMENT 详情
+`SHOW DEPLOYMENT`语句用于显示在线请求模式下某个已部署的任务的详情。
+
+
```SQL
SHOW DEPLOYMENT deployment_name;
```
-`SHOW DEPLOYMENT`语句用于显示某一个OnlineServing的详情。
## Example
@@ -12,7 +14,7 @@ SHOW DEPLOYMENT deployment_name;
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
@@ -24,24 +26,22 @@ USE db1;
```sql
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
```
-部署表t1的查询语句到OnlineServing:
+将一条关于表t1的查询语句部署上线:
```sql
DEPLOY demo_deploy select col0 from t1;
--- SUCCEED: deploy successfully
+-- SUCCEED
```
查看新部署的deployment:
```sql
SHOW DEPLOYMENT demo_deploy;
-```
-```
----- -------------
DB Deployment
----- -------------
@@ -64,16 +64,15 @@ FROM
--- ------- ---------- ------------
# Field Type IsConstant
--- ------- ---------- ------------
- 1 col0 kVarchar NO
+ 1 col0 Varchar NO
--- ------- ---------- ------------
# Output Schema
--- ------- ---------- ------------
# Field Type IsConstant
--- ------- ---------- ------------
- 1 col0 kVarchar NO
- --- ------- ---------- ------------
-
+ 1 col0 Varchar NO
+ --- ------- ---------- ------------
```
## 相关语句
diff --git a/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md b/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
index 33c14c5f683..2e709a1c55a 100644
--- a/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
+++ b/docs/zh/reference/sql/deployment_manage/SHOW_DEPLOYMENTS.md
@@ -1,38 +1,40 @@
# 查看 DEPLOYMENTS 列表
+`SHOW DEPLOYMENTS`语句用于显示处于在线请求模式的当前数据库下,已经部署的任务列表。
+
+
```SQL
SHOW DEPLOYMENTS;
```
-`SHOW DEPLOYMENTS`语句用户显示当前数据库下已经部署的Online serving列表。
## Example
-创建一个数据库,并设置为当前数据库:
+创建一个数据库,并设置为当前数据库:
```sql
CREATE DATABASE db1;
--- SUCCEED: Create database successfully
+-- SUCCEED
USE db1;
-- SUCCEED: Database changed
```
-创建一张表`t1`:
+创建一张表`t1`:
```sql
CREATE TABLE t1(col0 STRING);
--- SUCCEED: Create successfully
+-- SUCCEED
```
-部署表t1的查询语句到OnlineServing:
+部署表t1的查询语句:
```sql
DEPLOY demo_deploy select col0 from t1;
--- SUCCEED: deploy successfully
+-- SUCCEED
```
-查看当前数据库下所有的deployments:
+查看当前数据库下已部署的所有任务:
```sql
SHOW DEPLOYMENTS;
diff --git a/docs/zh/reference/sql/deployment_manage/index.rst b/docs/zh/reference/sql/deployment_manage/index.rst
index a4846c48b48..c794c5d0ef0 100644
--- a/docs/zh/reference/sql/deployment_manage/index.rst
+++ b/docs/zh/reference/sql/deployment_manage/index.rst
@@ -10,4 +10,4 @@ DEPLOYMENT 管理
DROP_DEPLOYMENT_STATEMENT
SHOW_DEPLOYMENTS
SHOW_DEPLOYMENT
- ONLINE_SERVING_REQUIREMENTS
+ ONLINE_REQUEST_REQUIREMENTS
diff --git a/docs/zh/reference/sql/dml/DELETE_STATEMENT.md b/docs/zh/reference/sql/dml/DELETE_STATEMENT.md
new file mode 100644
index 00000000000..f97105af759
--- /dev/null
+++ b/docs/zh/reference/sql/dml/DELETE_STATEMENT.md
@@ -0,0 +1,23 @@
+# DELETE
+
+## 语法
+
+```sql
+DeleteStmt ::=
+ DELETE FROM TableName WHERE where_condition
+
+TableName ::=
+ Identifier ('.' Identifier)?
+```
+
+**说明**
+
+`DELETE` 语句删除指定列的索引下面对应值的所有数据。
+
+## Examples
+
+```SQL
+DELETE FROM t1 WHERE col1 = 'aaaa';
+
+DELETE FROM t1 WHERE col1 = 'aaaa' and col2 = 'bbbb';
+```
\ No newline at end of file
diff --git a/docs/zh/reference/sql/dml/INSERT_STATEMENT.md b/docs/zh/reference/sql/dml/INSERT_STATEMENT.md
index 3d4d60332e3..b588aeeb944 100644
--- a/docs/zh/reference/sql/dml/INSERT_STATEMENT.md
+++ b/docs/zh/reference/sql/dml/INSERT_STATEMENT.md
@@ -1,6 +1,6 @@
# INSERT
-OpenMLDB 支持单行和多行插入语句
+OpenMLDB 支持一次插入单行或多行数据。
## syntax
@@ -21,12 +21,12 @@ value_list:
INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello");
-- insert a row into table with given columns's values
-INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello")
+INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello");
-- insert multiple rows into table with all columns
-INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello"), (10, 20, 30.0, 40.0, "world"), ;
+INSERT INTO t1 values(1, 2, 3.0, 4.0, "hello"), (10, 20, 30.0, 40.0, "world");
-- insert multiple rows into table with given columns's values
-INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello"), (10, 20, "world")
+INSERT INTO t1(COL1, COL2, COL5) values(1, 2, "hello"), (10, 20, "world");
```
diff --git a/docs/zh/reference/sql/dml/LOAD_DATA_STATEMENT.md b/docs/zh/reference/sql/dml/LOAD_DATA_STATEMENT.md
index 57bf558ef45..ac0ec4bf000 100644
--- a/docs/zh/reference/sql/dml/LOAD_DATA_STATEMENT.md
+++ b/docs/zh/reference/sql/dml/LOAD_DATA_STATEMENT.md
@@ -1,39 +1,45 @@
# LOAD DATA INFILE
-
+`LOAD DATA INFILE`语句能高效地将文件中的数据读取到数据库中的表中。`LOAD DATA INFILE` 与 `SELECT INTO OUTFILE`互补。要将数据从 table导出到文件,请使用[SELECT INTO OUTFILE](../dql/SELECT_INTO_STATEMENT.md)。要将文件数据导入到 table 中,请使用`LOAD DATA INFILE`。
## Syntax
```sql
LoadDataInfileStmt
- ::= 'LOAD' 'DATA' 'INFILE' filePath LoadDataInfileOptionsList
-filePath ::= string_literal
+ ::= 'LOAD' 'DATA' 'INFILE' filePath 'INTO' 'TABLE' tableName LoadDataInfileOptionsList
+filePath
+ ::= string_literal
+
+tableName
+ ::= string_literal
+
LoadDataInfileOptionsList
- ::= 'OPTIONS' '(' LoadDataInfileOptionItem (',' LoadDataInfileOptionItem)* ')'
+ ::= 'OPTIONS' '(' LoadDataInfileOptionItem (',' LoadDataInfileOptionItem)* ')'
LoadDataInfileOptionItem
- ::= 'DELIMITER' '=' string_literal
- |'HEADER' '=' bool_literal
- |'NULL_VALUE' '=' string_literal
- |'FORMAT' '=' string_literal
+ ::= 'DELIMITER' '=' string_literal
+ |'HEADER' '=' bool_literal
+ |'NULL_VALUE' '=' string_literal
+ |'FORMAT' '=' string_literal
```
+下表展示了`LOAD DATA INFILE`语句的配置项。
+
+| 配置项 | 类型 | 默认值 | 描述 |
+| ---------- | ------- | ------ |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| delimiter | String | , | 列分隔符,默认为`,`。 |
+| header | Boolean | true | 是否包含表头, 默认为`true` 。 |
+| null_value | String | null | NULL值,默认填充`"null"`。加载时,遇到null_value的字符串将被转换为`"null"`,插入表中。 |
+| format | String | csv | 导入文件的格式:
`csv`:不显示指明format时,默认为该值
`parquet`:集群版还支持导入parquet格式文件,单机版不支持。 |
+| quote | String | "" | 输入数据的包围字符串。字符串长度<=1。默认为"",表示解析数据,不特别处理包围字符串。配置包围字符后,被包围字符包围的内容将作为一个整体解析。例如,当配置包围字符串为"#"时, `1, 1.0, #This is a string field, even there is a comma#`将为解析为三个filed.第一个是整数1,第二个是浮点1.0,第三个是一个字符串。 |
+| mode | String | "error_if_exists" | 导入模式:
`error_if_exists`: 仅离线模式可用,若离线表已有数据则报错。
`overwrite`: 仅离线模式可用,数据将覆盖离线表数据。
`append`:离线在线均可用,若文件已存在,数据将追加到原文件后面。 |
+| deep_copy | Boolean | true | `deep_copy=false`仅支持离线load, 可以指定`INFILE` Path为该表的离线存储地址,从而不需要硬拷贝。 |
-`LOAD DATA INFILE`语句以非常高的速度将文件中的行读取到 table 中。`LOAD DATA INFILE` 与 `SELECT ... INTO OUTFILE`互补。要将数据从 table 写入文件,请使用[SELECT...INTO OUTFILE](../dql/SELECT_INTO_STATEMENT.md))。要将文件读回到 table 中,请使用`LOAD DATA INFILE`。两条语句的大部分配置项相同,具体包括:
-| 配置项 | 类型 | 默认值 | 描述 |
-| ---------- | ------- | ------ | ------------------------------------------------------------ |
-| delimiter | String | , | 列分隔符,默认为`,` |
-| header | Boolean | true | 是否包含表头, 默认为`true` |
-| null_value | String | null | NULL值,默认填充`"null"`。加载时,遇到null_value的字符串将被转换为NULL,插入表中。 |
-| format | String | csv | 加载文件的格式,默认为`csv`。请补充一下其他的可选格式。 |
-| quote | String | "" | 输入数据的包围字符串。字符串长度<=1。默认为"",表示解析数据,不特别处理包围字符串。配置包围字符后,被包围字符包围的内容将作为一个整体解析。例如,当配置包围字符串为"#"时, `1, 1.0, #This is a string field, even there is a comma#`将为解析为三个filed.第一个是整数1,第二个是浮点1.0,第三个是一个字符串。 |
-| mode | String | "error_if_exists" | 导入模式:
`error_if_exists`: 仅离线模式可用,若离线表已有数据则报错。
`overwrite`: 仅离线模式可用,数据将覆盖离线表数据。
`append`:离线在线均可用,若文件已存在,数据将追加到原文件后面。 |
-| deep_copy | Boolean | true | `deep_copy=false`仅支持离线load, 可以指定`INFILE` Path为该表的离线存储地址,从而不需要硬拷贝。|
```{note}
-在集群版中,`LOAD DATA INFILE`语句,根据当前执行模式(execute_mode)决定将数据导入到在线或离线存储。单机版中没有存储区别,同时也不支持`deep_copy`选项。
+在集群版中,`LOAD DATA INFILE`语句会根据当前执行模式(execute_mode)决定将数据导入到在线或离线存储。单机版中没有存储区别,同时也不支持`deep_copy`选项。
在线导入只能使用append模式。
-离线软拷贝导入后,OpenMLDB不应修改**软连接中的数据**,因此,如果当前离线数据是软连接,就不再支持append导入。并且,当前软连接的情况下,使用overwrite模式的硬拷贝,也不会删除软连接的数据。
+离线软拷贝导入后,OpenMLDB不应修改**软连接中的数据**,因此,如果当前离线数据是软连接,就不再支持`append`方式导入。并且,当前软连接的情况下,使用`overwrite`模式的硬拷贝,也不会删除软连接的数据。
```
```{warning} INFILE Path
@@ -41,14 +47,14 @@ LoadDataInfileOptionItem
`INFILE`路径的读取是由batchjob来完成的,如果是相对路径,就需要batchjob可以访问到的相对路径。
-在生产环境中,batchjob的执行通常是yarn集群调度,并不能确定由谁来执行。在测试环境中,如果也是多机部署,也很难确定batchjob在哪里运行。
+在生产环境中,batchjob的执行通常由yarn集群调度,难以确定具体的执行者。在测试环境中,如果也是多机部署,难以确定batchjob的具体执行者。
所以,请尽量使用绝对路径。单机测试中,本地文件用`file://`开头;生产环境中,推荐使用hdfs等文件系统。
```
## SQL语句模版
```sql
-LOAD DATA INFILE 'file_name' OPTIONS (key = value, ...)
+LOAD DATA INFILE 'file_name' INTO TABLE 'table_name' OPTIONS (key = value, ...);
```
## Examples:
@@ -57,18 +63,18 @@ LOAD DATA INFILE 'file_name' OPTIONS (key = value, ...)
```sql
set @@execute_mode='online';
-LOAD DATA INFILE 'data.csv' INTO TABLE t1 ( delimit = ',' );
+LOAD DATA INFILE 'data.csv' INTO TABLE t1 OPTIONS(delimiter = ',' );
```
-从`data.csv`文件读取数据到表`t1`中。并使用`,`作为列分隔符, 字符串"NA"将被替换为NULL。
+从`data.csv`文件读取数据到表`t1`中。并使用`,`作为列分隔符, 字符串"NA"将被替换为NULL。
```sql
-LOAD DATA INFILE 'data.csv' INTO TABLE t1 ( delimit = ',', nullptr_value='NA');
+LOAD DATA INFILE 'data.csv' INTO TABLE t1 OPTIONS(delimiter = ',', null_value='NA');
```
将`data_path`软拷贝到表`t1`中,作为离线数据。
```sql
set @@execute_mode='offline';
-LOAD DATA INFILE 'data_path' INTO TABLE t1 ( deep_copy=true );
+LOAD DATA INFILE 'data_path' INTO TABLE t1 OPTIONS(deep_copy=false);
```
diff --git a/docs/zh/reference/sql/dml/index.rst b/docs/zh/reference/sql/dml/index.rst
index 318d26c2713..8691351d9be 100644
--- a/docs/zh/reference/sql/dml/index.rst
+++ b/docs/zh/reference/sql/dml/index.rst
@@ -8,3 +8,4 @@
INSERT_STATEMENT
LOAD_DATA_STATEMENT
+ DELETE_STATEMENT
diff --git a/docs/zh/reference/sql/dql/GROUP_BY_CLAUSE.md b/docs/zh/reference/sql/dql/GROUP_BY_CLAUSE.md
index fc5e41332da..ef17465efd1 100644
--- a/docs/zh/reference/sql/dql/GROUP_BY_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/GROUP_BY_CLAUSE.md
@@ -1,7 +1,5 @@
# GROUP BY Clause
-所有的group by目前仅仅批模式支持(也就是控制台的调试SQL支持,离线模式还是开发中)
-
## Syntax
```SQL
@@ -16,26 +14,23 @@ SELECT select_expr [,select_expr...] FROM ... GROUP BY ...
```
## 边界说明
+在单机版中,所有执行模式均支持`GROUP BY`。集群版各执行模式的支持情况如下。
-| SELECT语句元素 | 状态 | 说明 |
-| :-------------- | ------------- | :----------------------------------------------------------- |
-| GROUP BY Clause | Online 不支持 | Group By 子句用于对查询结果集进行分组。分组表达式列表仅支持简单列。 |
-
-
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+|:----------------------------------------| --------- | ------------ |--------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| GROUP BY Clause | **``✓``** | | | Group By 子句用于对查询结果集进行分组。分组表达式的列表仅支持直接给出列名,如`group by c1,c2,...` ,不支持较复杂的写法。 |
## Example
-### 1. 按列分组后聚合
+ **1. 按列分组后聚合**
```SQL
--- desc: 简单SELECT分组KEY
- SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1;
+SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1;
```
-### 2. 按两列分组后聚合
+ **2. 按两列分组后聚合**
```SQL
--- desc: 简单SELECT分组KEY
- SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0;
+SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0;
```
diff --git a/docs/zh/reference/sql/dql/HAVING_CLAUSE.md b/docs/zh/reference/sql/dql/HAVING_CLAUSE.md
index 473c4e397a3..ed0ca1d8621 100644
--- a/docs/zh/reference/sql/dql/HAVING_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/HAVING_CLAUSE.md
@@ -20,21 +20,23 @@ SELECT select_expr [,select_expr...] FROM ... GROUP BY ... HAVING having_conditi
| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
| :--------------------------------------------- | --------- | ------------ | ------------ |:---------------------------------------------------------------------|
-| HAVING Clause | **``✓``** | | | Having 子句与 Where 子句作用类似。Having 子句过滤 GroupBy 后的各种数据,Where 子句在聚合前进行过滤。 |## Example
+| HAVING Clause | **``✓``** | | | Having 子句与 Where 子句作用类似。Having 子句过滤 GroupBy 后的各种数据,Where 子句在聚合前进行过滤。 |
-### 1. 分组后按聚合结果过滤
+
+## Example
+**1. 分组后按聚合结果过滤**
```SQL
SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1 HAVING SUM(COL2) > 1000;
```
-### 2. 两列分组后按聚合结果过滤
+**2. 两列分组后按聚合结果过滤**
```sql
SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1, COL0 HAVING SUM(COL2) > 1000;
```
-### 3. 分组后按分组列过滤
+**3. 分组后按分组列过滤**
```sql
SELECT COL1, SUM(COL2), AVG(COL2) FROM t1 group by COL1 HAVING COL1 ='a';
diff --git a/docs/zh/reference/sql/dql/JOIN_CLAUSE.md b/docs/zh/reference/sql/dql/JOIN_CLAUSE.md
index ff197145e3d..6cacd310202 100644
--- a/docs/zh/reference/sql/dql/JOIN_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/JOIN_CLAUSE.md
@@ -2,11 +2,12 @@
OpenMLDB目前仅支持`LAST JOIN`一种**JoinType**。
-LAST JOIN可以看作一种特殊的LEFT JOIN。在满足JOIN条件的前提下,左表的每一行拼取一条符合条件的最后一行。LAST JOIN分为无排序拼接,和排序拼接。
+LAST JOIN可以看作一种特殊的LEFT JOIN。在满足JOIN条件的前提下,左表的每一行拼接符合条件的最后一行。LAST JOIN分为无排序拼接,和排序拼接。
- 无排序拼接是指:未对右表作排序,直接拼接。
-- 排序拼接是指:在先对右表排序,然后再拼接。
+- 排序拼接是指:先对右表排序,然后再拼接。
+与LEFT JOIN相同,LAST JOIN也会返回左表中所有行,即使右表中没有匹配的行。
## Syntax
```
@@ -18,47 +19,134 @@ JoinType ::= 'LAST'
## SQL语句模版
```sql
-SELECT ... FROM table_ref LAST JOIN table_ref;
+SELECT ... FROM table_ref LAST JOIN table_ref ON expression;
```
## 边界说明
-| SELECT语句元素 | 状态 | 说明 |
-| :------------- | --------------- | :----------------------------------------------------------- |
-| JOIN Clause | 仅支持LAST JOIN | 表示数据来源多个表JOIN。OpenMLDB目前仅支持LAST JOIN。在Online Serving时,需要遵循[Online Serving下LAST JOIN的使用规范](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md#online-serving下last-join的使用规范) |
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+| :--------------------------------------------- | --------- | ------------ | ------------ |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| JOIN Clause| **``✓``** | **``✓``** | **``✓``** | 表示数据来源多个表JOIN。OpenMLDB目前仅支持LAST JOIN。在线请求模式下,需要遵循[在线请求模式下LAST JOIN的使用规范](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#online-serving下last-join的使用规范) |
-### LAST JOIN without ORDER BY
-#### Example: **LAST JOIN无排序拼接**
+### 未排序的LAST JOIN
-```sql
--- desc: 简单拼表查询 without ORDER BY
-
-SELECT t1.col1 as t1_col1, t2.col1 as t2_col2 from t1 LAST JOIN t2 ON t1.col1 = t2.col1
-```
-`LAST JOIN`无排序拼接时,拼接第一条命中的数据行
+`LAST JOIN`无排序拼接时,拼接最后一条命中的数据行。
+#### 计算逻辑示例
![Figure 7: last join without order](../dql/images/last_join_without_order.png)
-以左表第二行为例,符合条件的右表是无序的,命中条件的有2条,选择最后一条`5, b, 2020-05-20 10:11:12`
+以左表第二行为例,符合条件的右表是无序的,命中条件的有2条,选择最后一条`5, b, 2020-05-20 10:11:12`。最后的拼接结果如下。
![Figure 8: last join without order result](../dql/images/last_join_without_order2.png)
-最后的拼表结果如上图所示。
-
-### LAST JOIN with ORDER BY
+```{note}
+为了实现上图展示的拼接效果,即使您使用的是离线模式,也请遵循[在线请求模式下LAST JOIN的使用规范](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md#online-serving下last-join的使用规范),如下文的SQL样例所示。
+否则由于底层存储顺序的不确定,尽管执行结果也是正确的,却可能无法复现上述拼接结果。
+```
-#### Example: LAST JOIN排序拼接
+#### SQL示例
+**使用OpenMLDB SQL语句复现上述计算逻辑的过程如下。**
-```SQL
--- desc: 简单拼表查询 with ORDER BY
-SELECT t1.col1 as t1_col1, t2.col1 as t2_col2 from t1 LAST JOIN t2 ORDER BY t2.std_ts ON t1.col1 = t2.col1
+启动单机版OpenMLDB服务端和CLI客户端
+```bash
+./init.sh standalone
+./openmldb/bin/openmldb --host 127.0.0.1 --port 6527
+```
+建立上述左表t1,插入数据。为了便于查看结果,在col1上建立单列索引,以std_ts作为TS。在本例中也可以不在左表上建立索引,不影响拼接结果。
+```sql
+>CREATE TABLE t1 (id INT, col1 STRING,std_ts TIMESTAMP,INDEX(KEY=col1,ts=std_ts));
+SUCCEED
+>INSERT INTO t1 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t1 values(2,'b',20200520101114);
+SUCCEED
+>INSERT INTO t1 values(3,'c',20200520101116);
+SUCCEED
+>SELECT * from t1;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 1 a 20200520101112
+ 2 b 20200520101114
+ 3 c 20200520101116
+ ---- ------ ----------------
+
+3 rows in set
+```
+
+建立上述右表t2,建立索引,插入数据。
+```{note}
+底层存储顺序不一定与插入顺序一致,而底层存储顺序会影响JOIN时的命中顺序。本例为了便于验证拼接结果,需要实现上图右表的存储顺序。t2必须建立下述索引(注意不能添加TS),且逐条按序插入数据,原因见[列索引](https://openmldb.ai/docs/zh/main/reference/sql/ddl/CREATE_TABLE_STATEMENT.html#columnindex)。
+```
+```sql
+>CREATE TABLE t2 (id INT, col1 string,std_ts TIMESTAMP,INDEX(KEY=col1));
+SUCCEED
+>INSERT INTO t2 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(2,'a',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(3,'b',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(4,'c',20200520101114);
+SUCCEED
+>INSERT INTO t2 values(5,'b',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(6,'c',20200520101113);
+SUCCEED
+>SELECT * from t2;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 2 a 20200520101113
+ 1 a 20200520101112
+ 5 b 20200520101112
+ 3 b 20200520101113
+ 6 c 20200520101113
+ 4 c 20200520101114
+ ---- ------ ----------------
+
+6 rows in set
+```
+执行LAST JOIN
+```sql
+> SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 5 b 20200520101112
+ 3 c 20200520101116 6 c 20200520101113
+ ---- ------ ---------------- ---- ------ ----------------
+
+3 rows in set
+```
+若不在t1上建立索引,拼接结果相同,仅SELECT展示顺序不同。
+```sql
+> SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 3 c 20200520101116 6 c 20200520101113
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 5 b 20200520101112
+ ---- ------ ---------------- ---- ------ ----------------
+
+3 rows in set
+```
+```{note}
+`LAST JOIN`使用了索引优化:使用`LAST JOIN` 的 condition 和 order by 列寻找最匹配的表索引;如果有index就会使用该index的ts项作为未排序last join隐式使用的order;反之没有index,就使用表的存储顺序。没有索引的表的底层存储顺序是不可预测的。请注意,在建表时若没有显示指出索引的ts项,OpenMLDB会使用该条数据被插入时的时间戳作为ts。
```
+
+
+### 排序的LAST JOIN
`LAST JOIN`时配置 `Order By` ,则右表按Order排序,拼接最后一条命中的数据行。
+#### 计算逻辑示例
+
![Figure 9: last join with order](../dql/images/last_join_with_order1.png)
以左表第二行为例,符合条件的右表有2条,按`std_ts`排序后,选择最后一条`3, b, 2020-05-20 10:11:13`
@@ -66,3 +154,88 @@ SELECT t1.col1 as t1_col1, t2.col1 as t2_col2 from t1 LAST JOIN t2 ORDER BY t2.s
![Figure 10: last join with order result](../dql/images/last_join_with_order2.png)
最后的拼表结果如上图所示。
+
+#### SQL示例
+**使用OpenMLDB SQL语句复现上述计算逻辑的过程如下。**
+
+建立上述左表t1,插入数据。可以不建立索引。
+```SQL
+>CREATE TABLE t1 (id INT, col1 STRING,std_ts TIMESTAMP);
+SUCCEED
+>INSERT INTO t1 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t1 values(2,'b',20200520101114);
+SUCCEED
+>INSERT INTO t1 values(3,'c',20200520101116);
+SUCCEED
+>SELECT * from t1;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 1 a 20200520101112
+ 2 b 20200520101114
+ 3 c 20200520101116
+ ---- ------ ----------------
+
+3 rows in set
+```
+建立上述右表t2,插入数据。可以不建立索引。
+```sql
+>CREATE TABLE t2 (id INT, col1 string,std_ts TIMESTAMP);
+SUCCEED
+>INSERT INTO t2 values(1,'a',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(2,'a',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(3,'b',20200520101113);
+SUCCEED
+>INSERT INTO t2 values(4,'c',20200520101114);
+SUCCEED
+>INSERT INTO t2 values(5,'b',20200520101112);
+SUCCEED
+>INSERT INTO t2 values(6,'c',20200520101113);
+SUCCEED
+>SELECT * from t2;
+ ---- ------ ----------------
+ id col1 std_ts
+ ---- ------ ----------------
+ 2 a 20200520101113
+ 1 a 20200520101112
+ 5 b 20200520101112
+ 3 b 20200520101113
+ 6 c 20200520101113
+ 4 c 20200520101114
+ ---- ------ ----------------
+
+6 rows in set
+```
+执行LAST JOIN
+```sql
+>SELECT * from t1 LAST JOIN t2 ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ---- ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ---- ------ ----------------
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 3 b 20200520101113
+ 3 c 20200520101116 4 c 20200520101114
+ ---- ------ ---------------- ---- ------ ----------------
+```
+
+
+### LAST JOIN 未命中
+以下示例展示了当右表没有任何一行数据能与左表中某行匹配时的执行结果。
+
+在[排序的LAST JOIN](#排序的LAST JOIN)中创建的t1表中插入新行并执行LAST JOIN
+```sql
+>INSERT INTO t1 values(4,'d',20220707111111);
+SUCCEED
+>SELECT * from t1 LAST JOIN t2 ORDER BY t2.std_ts ON t1.col1 = t2.col1;
+ ---- ------ ---------------- ------ ------ ----------------
+ id col1 std_ts id col1 std_ts
+ ---- ------ ---------------- ------ ------ ----------------
+ 4 d 20220707111111 NULL NULL NULL
+ 3 c 20200520101116 4 c 20200520101114
+ 1 a 20200520101112 2 a 20200520101113
+ 2 b 20200520101114 3 b 20200520101113
+ ---- ------ ---------------- ------ ------ ----------------
+```
diff --git a/docs/zh/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md b/docs/zh/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
index 1c06021e668..0cf3846eab4 100644
--- a/docs/zh/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/NO_TABLE_SELECT_CLAUSE.md
@@ -20,11 +20,11 @@ SelectExpr ::= ( Identifier '.' ( Identifier '.' )? )? '*'
SELECT const_expr [, const_expr ...];
```
-## 2. SELECT语句元素
+## 边界说明
-| SELECT语句元素 | 状态 | 说明 |
-|:-----------| ------------------- | :----------------------------------------------------------- |
-| 无表SELECT语句 | OnlineServing不支持 | 无表Select语句计算常量表达式操作列表,表达式计算不需要依赖表和列 |
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+| :--------------------------------------------- | --------- | ------------ | ------------ |:-------------------------------------|
+| 无表SELECT语句 | **``✓``** | **``✓``** | | 无表Select语句计算给定的常量表达式操作列表,该计算不需要依赖表和列 |
#### Examples
diff --git a/docs/zh/reference/sql/dql/SELECT_INTO_STATEMENT.md b/docs/zh/reference/sql/dql/SELECT_INTO_STATEMENT.md
index 1b87373b863..4b677300d6e 100644
--- a/docs/zh/reference/sql/dql/SELECT_INTO_STATEMENT.md
+++ b/docs/zh/reference/sql/dql/SELECT_INTO_STATEMENT.md
@@ -9,32 +9,33 @@
SelectIntoStmt
::= SelectStmt 'INTO' 'OUTFILE' filePath SelectIntoOptionList
-filePath ::= string_literal
+filePath
+ ::= string_literal
SelectIntoOptionList
- ::= 'OPTIONS' '(' SelectInfoOptionItem (',' SelectInfoOptionItem)* ')'
+ ::= 'OPTIONS' '(' SelectInfoOptionItem (',' SelectInfoOptionItem)* ')'
SelectInfoOptionItem
- ::= 'DELIMITER' '=' string_literal
- |'HEADER' '=' bool_literal
- |'NULL_VALUE' '=' string_literal
- |'FORMAT' '=' string_literal
- |'MODE' '=' string_literal
+ ::= 'DELIMITER' '=' string_literal
+ |'HEADER' '=' bool_literal
+ |'NULL_VALUE' '=' string_literal
+ |'FORMAT' '=' string_literal
+ |'MODE' '=' string_literal
```
`SELECT INTO OUTFILE`分为三个部分。
-- 第一部分是一个普通的SELECT语句,通过这个SELECT语句来查询所需要的数据;
+- 第一部分是一个普通的`SELECT`语句,通过这个`SELECT`语句来查询所需要的数据;
- 第二部分是`filePath`,定义将查询的记录导出到哪个文件中;
- 第三部分是`SelectIntoOptionList`为可选选项,其可能的取值有:
-| 配置项 | 类型 | 默认值 | 描述 |
-| ---------- | ------- | --------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| delimiter | String | , | 列分隔符,默认为‘`,`’ |
-| header | Boolean | true | 是否包含表头, 默认为`true` |
-| null_value | String | null | NULL填充值,默认填充`"null"` |
-| format | String | csv | 输出文件格式:
`csv`:不显示指明format时,默认为该值
`parquet`:集群版还支持导出parquet格式文件,单机版不支持 |
-| mode | String | error_if_exists | 输出模式:
`error_if_exists`: 表示若文件已经在则报错。
`overwrite`: 表示若文件已存在,数据将覆盖原文件内容。
`append`:表示若文件已存在,数据将追加到原文件后面。
不显示配置时,默认为`error_if_exists`。 |
-| quote | String | "" | 输出数据的包围字符串,字符串长度<=1。默认为"",表示输出数据包围字符串为空。当配置包围字符串时,将使用包围字符串包围一个field。例如,我们配置包围字符串为`"#"`,原始数据为{1 1.0, This is a string, with comma}。输出的文本为`#1#, #1.0#, #This is a string, with comma#。` |
+| 配置项 | 类型 | 默认值 | 描述 |
+| ---------- | ------- | --------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| delimiter | String | , | 列分隔符,默认为‘`,`’ |
+| header | Boolean | true | 是否包含表头, 默认为`true` |
+| null_value | String | null | NULL填充值,默认填充`"null"` |
+| format | String | csv | 输出文件格式:
`csv`:不显示指明format时,默认为该值
`parquet`:集群版还支持导出parquet格式文件,单机版不支持 |
+| mode | String | error_if_exists | 输出模式:
`error_if_exists`: 表示若文件已经在则报错。
`overwrite`: 表示若文件已存在,数据将覆盖原文件内容。
`append`:表示若文件已存在,数据将追加到原文件后面。
不显示配置时,默认为`error_if_exists`。 |
+| quote | String | "" | 输出数据的包围字符串,字符串长度<=1。默认为"",表示输出数据包围字符串为空。当配置包围字符串时,将使用包围字符串包围一个field。例如,我们配置包围字符串为`"#"`,原始数据为{1, 1.0, This is a string, with comma}。输出的文本为`1, 1.0, #This is a string, with comma#。` |
````{important}
请注意,目前仅有集群版支持quote字符的转义。所以,如果您使用的是单机版,请谨慎选择quote字符,保证原始字符串内并不包含quote字符。
@@ -60,5 +61,24 @@ SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data.csv' OPTIONS ( delimiter = ',
SELECT col1, col2, col3 FROM t1 INTO OUTFILE 'data2.csv' OPTIONS ( delimiter = '|', null_value='NA');
```
+## Q&A
+Q: select into 错误 Found duplicate column(s)?
+```
+Exception in thread "main" org.apache.spark.sql.AnalysisException: Found duplicate column(s) when inserting into file:/tmp/out: `c1`;
+ at org.apache.spark.sql.util.SchemaUtils$.checkColumnNameDuplication(SchemaUtils.scala:90)
+ at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:84)
+ at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
+ at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
+ at org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:131)
+ at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
+ at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
+ at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
+ at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
+ at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
+ at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:122)
+ at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:121)
+ at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:944)
+```
+A: 查询语句是允许列名重复的。但`SELECT INTO`除了查询还需要写入,写入中会检查重复列名。请避免重复列名,可以用`c1 as c_new`来重命名列。
diff --git a/docs/zh/reference/sql/dql/SELECT_STATEMENT.md b/docs/zh/reference/sql/dql/SELECT_STATEMENT.md
index a66d454cf3b..3fb8365b941 100644
--- a/docs/zh/reference/sql/dql/SELECT_STATEMENT.md
+++ b/docs/zh/reference/sql/dql/SELECT_STATEMENT.md
@@ -86,18 +86,17 @@ WindowInstanceNotInWindow
### SelectExprList
-```
+```sql
SelectExprList
::= SelectExpr ( ',' SelectExpr )*
SelectExpr ::= ( Identifier '.' ( Identifier '.' )? )? '*'
| ( Expression | '{' Identifier Expression '}' ) ['AS' Identifier]
-
-
+
```
### TableRefs
-```
+```sql
TableRefs
::= EscapedTableRef ( ',' EscapedTableRef )*
TableRef ::= TableFactor
@@ -111,21 +110,23 @@ TableAsName
## SELECT语句元素
-| SELECT语句元素 | 状态 | 说明 |
-| :--------------------------------------------- | ---------------------- | :----------------------------------------------------------- |
-| `SELECT` [`SelectExprList`](#selectexprlist) | 已支持 | 投影操作列表,一般包括列名、表达式,或者是用 '*' 表示全部列 |
-| `FROM` [`TableRefs`](#tablerefs) | 已支持 | 表示数据来源,数据来源可以是一个表(`select * from t;`)或者是多个表JOIN (`select * from t1 join t2;`) 或者是0个表 ( `select 1+1;`) |
-| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | 仅支持LAST JOIN | 表示数据来源多个表JOIN。OpenMLDB目前仅支持LAST JOIN。在Online Serving时,需要遵循[Online Serving下OP的使用规范](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md) |
-| [`WHERE` Clause](../dql/WHERE_CLAUSE.md) | Online Serving不支持 | Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据。 |
-| [`GROUP BY` Clause](../dql/GROUP_BY_CLAUSE.md) | Online 不支持 | Group By 子句用于对查询结果集进行分组。分组表达式列表仅支持简单列。 |
-| [`HAVING` Clause](../dql/HAVING_CLAUSE.md) | Online 不支持 | Having 子句与 Where 子句作用类似,Having 子句可以让过滤 GroupBy 后的各种数据,Where 子句用于在聚合前过滤记录。 |
-| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | Online Training 不支持 | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数来进行一些分析型计算的操作(```sql agg_func() over window_name```)。在Online Serving时,需要遵循[Online Serving下OP的使用规范](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md) |
-| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | Online Serving不支持 | Limit 子句用于限制结果条数。OpenMLDB 目前仅支持Limit 接受一个参数,表示返回数据的最大行数; |
-| `ORDER BY` Clause | 不支持 | 标准SQL还支持OrderBy子句。OpenMLDB目前尚未支持Order子句。例如,查询语句`SELECT * from t1 ORDER BY col1;`在OpenMLDB中不被支持。 |
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+|:-----------------------------------------------| --------- | ------------ | ------------ |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [`SELECT` Clause](#selectexprlist) | **``✓``** | **``✓``** | **``✓``** | 投影操作列表,一般包括列名、表达式,或者是用 `*` 表示全部列 |
+| [`FROM` Clause](#tablerefs) | **``✓``** | **``✓``** | **``✓``** | 表示数据来源,数据来源可以是一个表(`select * from t;`)或者是多个表 LAST JOIN (见[JOIN 子句](../dql/JOIN_CLAUSE.md)) 或者是0个表 ( `select 1+1;`),详见[NO_TABLE SELECT](../dql/NO_TABLE_SELECT_CLAUSE.md) |
+| [`JOIN` Clause](../dql/JOIN_CLAUSE.md) | **``✓``** | **``✓``** | **``✓``** | 表示数据来源多个表JOIN。OpenMLDB目前仅支持LAST JOIN。在线请求模式下,需要遵循[Online Request下LAST JOIN的使用规范](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#online-serving下last-join的使用规范) |
+| [`WHERE` Clause](../dql/WHERE_CLAUSE.md) | **``✓``** | **``✓``** | | Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据。 |
+| [`GROUP BY` Clause](../dql/GROUP_BY_CLAUSE.md) | **``✓``** | | | Group By 子句用于对查询结果集进行分组。分组表达式列表仅支持简单列。 |
+| [`HAVING` Clause](../dql/HAVING_CLAUSE.md) | **``✓``** | | | Having 子句与 Where 子句作用类似.Having 子句过滤 GroupBy 后的各种数据,Where 子句在聚合前进行过滤。 |
+| [`WINDOW` Clause](../dql/WINDOW_CLAUSE.md) | **``✓``** | | **``✓``** | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数来进行一些分析型计算的操作(```sql agg_func() over window_name```)。线请求模式下,需要遵循[Online Request下Window的使用规范](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#online-serving下window的使用规范) |
+| [`LIMIT` Clause](../dql/LIMIT_CLAUSE.md) | **``✓``** | **``✓``** | **``✓``** | Limit子句用于限制返回的结果条数。目前Limit仅支持接受一个参数,表示返回数据的最大行数。 |
+| `ORDER BY` Clause | | | | 标准SQL还支持Order By子句。OpenMLDB目前尚未支持Order子句。例如,查询语句`SELECT * from t1 ORDER BY col1;`在OpenMLDB中不被支持。 |
```{warning}
在线模式或单机版的select,可能无法获取完整数据。
-因为一次查询可能在多台tablet server上进行大量的扫描,为了tablet server的稳定性,单台tablet server限制了最大扫描数据量,即`scan_max_bytes_size`。
+因为一次查询可能在多台tablet 上进行大量的扫描,为了tablet 的稳定性,单个tablet 限制了最大扫描数据量,即`scan_max_bytes_size`。
+
+如果出现select结果截断,tablet 会出现`reach the max byte ...`的日志,但查询不会报错。
-如果出现select结果截断,tablet server会出现`reach the max byte ...`的日志,但查询不会报错。
+在线模式或单机版都不适合做大数据的扫描,推荐使用集群版的离线模式。如果一定要调大扫描量,需要对每台tablet配置`--scan_max_bytes_size=xxx`,并重启tablet生效。
```
\ No newline at end of file
diff --git a/docs/zh/reference/sql/dql/WHERE_CLAUSE.md b/docs/zh/reference/sql/dql/WHERE_CLAUSE.md
index f82d20f08e4..640b9954c56 100644
--- a/docs/zh/reference/sql/dql/WHERE_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/WHERE_CLAUSE.md
@@ -1,6 +1,6 @@
# WHERE Clause
-Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据
+Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据。
## Syntax
@@ -17,24 +17,23 @@ SELECT select_expr [,select_expr...] FROM ... WHERE where_condition
```
## 边界说明
+在单机版中,所有执行模式均支持`WHERE`子句。下表说明了集群版各模式的支持情况。
-| SELECT语句元素 | 状态 | 说明 |
-| :------------- | -------------------- | :----------------------------------------------------------- |
-| WHERE Clause | Online Serving不支持 | Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据。 |
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+| :--------------------------------------------- | --------- | ------------ | ------------ |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| WHERE Clause | **``✓``** | **``✓``** | | Where 子句用于设置过滤条件,查询结果中只会包含满足条件的数据。 |
## Example
### 简单条件过滤
```SQL
--- desc: SELECT简单过滤
- sql: SELECT COL1 FROM t1 where COL1 > 10;
+SELECT COL1 FROM t1 where COL1 > 10;
```
-### 复杂条件简单条件过滤
+### 复杂条件过滤
```sql
--- desc: SELECT过滤条件是复杂逻辑关系表达式
- sql: SELECT COL1 FROM t1 where COL1 > 10 and COL2 = 20 or COL1 =0;
+SELECT COL1 FROM t1 where COL1 > 10 and COL2 = 20 or COL1 =0;
```
diff --git a/docs/zh/reference/sql/dql/WINDOW_CLAUSE.md b/docs/zh/reference/sql/dql/WINDOW_CLAUSE.md
index 6845fc5a283..219793063d8 100644
--- a/docs/zh/reference/sql/dql/WINDOW_CLAUSE.md
+++ b/docs/zh/reference/sql/dql/WINDOW_CLAUSE.md
@@ -4,51 +4,61 @@
```sql
WindowClauseOptional
- ::= ( 'WINDOW' WindowDefinition ( ',' WindowDefinition )* )?
+ ::= ( 'WINDOW' WindowDefinition ( ',' WindowDefinition )* )?
+
WindowDefinition
- ::= WindowName 'AS' WindowSpec
+ ::= WindowName 'AS' WindowSpec
WindowSpec
- ::= '(' WindowSpecDetails ')'
-
-WindowSpecDetails
- ::= [ExistingWindowName] [WindowUnionClause] WindowPartitionClause WindowOrderByClause WindowFrameClause [WindowExcludeCurrentTime] [WindowInstanceNotInWindow]
+ ::= '(' WindowSpecDetails ')'
+WindowSpecDetails
+ ::= [ExistingWindowName] [WindowUnionClause] WindowPartitionClause WindowOrderByClause WindowFrameClause (WindowAttribute)*
WindowUnionClause
- :: = ( 'UNION' TableRefs)
+ :: = ( 'UNION' TableRefs)
WindowPartitionClause
- ::= ( 'PARTITION' 'BY' ByList )
+ ::= ( 'PARTITION' 'BY' ByList )
WindowOrderByClause
- ::= ( 'ORDER' 'BY' ByList )
-
+ ::= ( 'ORDER' 'BY' ByList )
WindowFrameClause
- ::= ( WindowFrameUnits WindowFrameExtent [WindowFrameMaxSize])
+ ::= ( WindowFrameUnits WindowFrameExtent [WindowFrameMaxSize])
WindowFrameUnits
- ::= 'ROWS'
- | 'ROWS_RANGE'
+ ::= 'ROWS'
+ | 'ROWS_RANGE'
WindowFrameExtent
- ::= WindowFrameStart
- | WindowFrameBetween
+ ::= WindowFrameStart
+ | WindowFrameBetween
+
WindowFrameStart
- ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
- | 'CURRENT' 'ROW'
+ ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
+ | 'CURRENT' 'ROW'
+
WindowFrameBetween
- ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+ ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+
WindowFrameBound
- ::= WindowFrameStart
- | ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'FOLLOWING'
-
-WindowExcludeCurrentTime
- ::= 'EXCLUDE' 'CURRENT_TIME'
+ ::= WindowFrameStart
+ | ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'FOLLOWING'
+
+WindowAttribute
+ ::= WindowExcludeCurrentTime
+ | WindowExcludeCurrentRow
+ | WindowInstanceNotInWindow
+
+WindowExcludeCurrentTime
+ ::= 'EXCLUDE' 'CURRENT_TIME'
+
+WindowExcludeCurrentRow
+ ::= 'EXCLUDE' 'CURRENT_ROW'
WindowInstanceNotInWindow
- :: = 'INSTANCE_NOT_IN_WINDOW'
+ :: = 'INSTANCE_NOT_IN_WINDOW'
```
*窗口调用函数*实现了类似于聚合函数的功能。 不同的是,窗口调用函数不需要将查询结果打包成一行输出—在查询输出中,每一行都是分开的。 然而,窗口调用函数可以扫描所有的行,根据窗口调用函数的分组规范(`PARTITION BY`列), 这些行可能会是当前行所在组的一部分。一个窗口调用函数的语法是下列之一:
@@ -62,35 +72,66 @@ function_name ( * ) OVER window_name
## SQL语句模版
-- ROWS WINDOW SQL模版
+- ROWS WINDOW(条数窗口) SQL模版
-```sqlite
+```sql
SELECT select_expr [, select_expr ...], window_function_name(expr) OVER window_name, ... FROM ... WINDOW AS window_name (PARTITION BY ... ORDER BY ... ROWS BETWEEN ... AND ...)
-
```
-- ROWS RANGE WINDOW SQL模版
+- ROWS RANGE WINDOW(时间窗口) SQL模版
```sql
SELECT select_expr [,select_expr...], window_function_name(expr) OVER window_name, ... FROM ... WINDOW AS window_name (PARTITION BY ... ORDER BY ... ROWS_RANEG BETWEEN ... AND ...)
```
+## 快速上手
+
+首先选择窗口类型,按时间,还是按条数划分窗口。
+
+再看窗口想要什么大小,这里要分窗口类型说明:
+1. 时间窗口:时间窗口通常使用s, m, h, d等时间单位,如果没有单位,默认为ms。比如:
+
+ [3小时前,当前行] - 3h preceding and current row
+ [3小时前,30分钟前] - 3h preceding and 30m preceding
+
+1. 条数窗口:条数不需要单位。比如:
+ [10条,当前行] - 10 preceding and current row
+ [10条,3条] - 10 preceding and 3 preceding
+
+### 如何推断窗口是什么样的?
+
+首先,先明确是什么执行模式:
+
+离线模式,即批模式,它是对from表的每一行都做一次窗口划分与计算。因此,每一行对应产生一行SQL结果。
+请求模式,会带一条请求行,它会将请求行当做from表的数据,只对该行做窗口划分和计算,因此,只产生一行SQL结果。
+
+再看,如何划分窗口:
+
+我们将批模式看作多次请求模式来看待。所以,对一次请求行来说,窗口只可能包含,它自己,与它的partition by列值相等的行(可能的全集)。
+
+partition key相等的所有行,还不是窗口,经由order by列排序后,还需要排除窗口范围以外的数据。比如,10 preceding and current row的条数窗口,就要抛弃10行以外的数据行(第10行包含在窗口内),又因为包括current row,于是窗口一共有11行数据。
+
+* preceding为闭区间,包含该条,开区间使用open preceding
+
+窗口还可以exclude current time,current row等,详情见下文。
+
+
## 边界说明
-| SELECT语句元素 | 状态 | 说明 |
-| :------------- | ---------------------- | :----------------------------------------------------------- |
-| WINDOW Clause | Online Training 不支持 | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数来进行一些分析型计算的操作(```sql agg_func() over window_name```)。
OpenMLDB目前仅支持历史窗口,不支持未来窗口(即不支持`FOLLOWING`类型的窗口边界)。
OpenMLDB的窗口仅支持`PARTITION BY`列,不支持`PARTITION BY`运算或者函数表达式。
OpenMLDB的窗口仅支持`ORDER BY`列,不支持`ORDER BY`运算或者函数表达式。
在Online Serving时,需要遵循[3.2 Online Serving下Window的使用规范](../deployment_manage/ONLINE_SERVING_REQUIREMENTS.md#online-serving下window的使用规范) |
+| SELECT语句元素 | 离线模式 | 在线预览模式 | 在线请求模式 | 说明 |
+|:----------------| --------- | ------------ | ------------ |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| WINDOW Clause | **``✓``** | | **``✓``** | 窗口子句用于定义一个或者若干个窗口。窗口可以是有名或者匿名的。用户可以在窗口上调用聚合函数来进行一些分析型计算的操作(```sql agg_func() over window_name```)。在线请求模式下,需要遵循[Online Request下Window的使用规范](../deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md#online-serving下window的使用规范) |
-## 基本的WINDOW SPEC语法元素
+## 基本的 WindowSpec 语法元素
### Window Partition Clause 和 Window OrderBy Clause
```sql
WindowPartitionClause
- ::= ( 'PARTITION' 'BY' ByList )
+ ::= ( 'PARTITION' 'BY' ByList )
WindowOrderByClause
- ::= ( 'ORDER' 'BY' ByList )
+ ::= ( 'ORDER' 'BY' ByList )
```
`PARTITION BY`选项将查询的行分为一组进入*partitions*, 这些行在窗口函数中单独处理。`PARTITION BY`和查询级别`GROUP BY` 子句做相似的工作,除了它的表达式只能作为表达式不能作为输出列的名字或数。OpenMLDB要求必须配置`PARTITION BY`。并且目前**仅支持按列分组**,无法支持按运算和函数表达式分组。
@@ -101,31 +142,33 @@ WindowOrderByClause
```sql
WindowFrameUnits
- ::= 'ROWS'
- | 'ROWS_RANGE'
+ ::= 'ROWS'
+ | 'ROWS_RANGE'
```
-WindowFrameUnits定义了窗口的框架类型。OpenMLDB支持两类窗口框架:ROWS和ROWS_RANGE
+WindowFrameUnits定义了窗口的框架类型。OpenMLDB支持两类窗口框架:ROWS和ROWS_RANGE。
SQL标准的RANGE类窗口OpenMLDB系统目前暂不支持。他们直接的对比差异如下图所示
![Figure 1: window frame type](../dql/images/window_frame_type.png)
-- ROWS: 窗口按行划入窗口,根据条数滑出窗口
-- ROWS_RANGE:窗口按行划入窗口,根据时间区间滑出窗口
+- ROWS: 窗口按行划入窗口,根据**条数**滑出窗口
+- ROWS_RANGE:窗口按行划入窗口,根据**时间区间**滑出窗口
- RANGE: 窗口按时间粒度划入窗口(一次可能滑入多条同一时刻的数据行),按时间区间滑出窗口
### Window Frame Extent
```sql
WindowFrameExtent
- ::= WindowFrameStart
- | WindowFrameBetween
+ ::= WindowFrameStart
+ | WindowFrameBetween
+
WindowFrameBetween
- ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+ ::= 'BETWEEN' WindowFrameBound 'AND' WindowFrameBound
+
WindowFrameBound
- ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
- | 'CURRENT' 'ROW'
+ ::= ( 'UNBOUNDED' | NumLiteral | IntervalLiteral ) ['OPEN'] 'PRECEDING'
+ | 'CURRENT' 'ROW'
```
**WindowFrameExtent**定义了窗口的上界和下界。框架类型可以用 `ROWS`或`ROWS_RANGE`声明;
@@ -135,17 +178,19 @@ WindowFrameBound
- `expr` PRECEDING
- 窗口类型为ROWS时,`expr`必须为一个正整数。它表示边界为当前行往前`expr`行。
- 窗口类型为ROWS_RANGE时,`expr`一般为时间区间(例如`10s`, `10m`,`10h`, `10d`),它表示边界为当前行往前移expr时间段(例如,10秒,10分钟,10小时,10天)
+ - 也可以写成正整数,单位为 ms, 例如 `1000` 等价于 `1s`
- OpenMLDB支持默认边界是闭合的。但支持OPEN关键字来修饰边界开区间
- 请注意:标准SQL中,还支持FOLLOWING的边界,当OpenMLDB并不支持。
-#### **Example: 有名窗口(Named Window)**
+#### Example
+- **有名窗口(Named Window)**
```SQL
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 3 PRECEDING AND CURRENT ROW)
```
-#### **Example: 匿名窗口**
+- **匿名窗口**
```SQL
SELECT id, pk1, col1, std_ts,
@@ -153,33 +198,32 @@ sum(col1) OVER (PARTITION BY pk1 ORDER BY std_ts ROWS BETWEEN 1 PRECEDING AND CU
from t1;
```
-#### **Example: ROWS窗口**
+- **ROWS 类型窗口**
+定义一个ROWS 类型窗口, 窗口范围是前1000行到当前行。
```SQL
--- ROWS example
--- desc: window ROWS, 前1000条到当前条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 1000 PRECEDING AND CURRENT ROW);
```
-#### **Example: ROWS RANGE窗口**
+- **ROWS_RANGE 类型窗口**
+定义一个ROWS_RANGE类型窗口,窗口范围是当前行前10s的所有行,以及当前行。
```SQL
--- ROWS example
--- desc: window ROWS_RANGE, 前10s到当前条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW);
```
-## OpenMLDB特有的WINDOW SPEC元素
+## OpenMLDB特有的 WindowSpec 元素
-### Window With Union
+### 1. WINDOW ... UNION
```sql
WindowUnionClause
- :: = ( 'UNION' TableRefs)
+ :: = ( 'UNION' TableRefs)
```
-#### **Example: Window with union 一张副表**
+#### Example
+- **基于一张副表的 WINDOW ... UNION**
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -188,7 +232,7 @@ WINDOW w1 AS (UNION t2 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PR
![Figure 2: window union one table](../dql/images/window_union_1_table.png)
-#### **Example: Window with union 多张副表**
+- **基于多张副表的 WINDOW ... UNION**
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -197,7 +241,9 @@ WINDOW w1 AS (UNION t2, t3 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10
![Figure 3: window union two tables](../dql/images/window_union_2_table.png)
-#### **Example: Window with union 样本表不进入窗口**
+- **带有 INSTANCE_NOT_IN_WINDOW 的 WINDOW ... UNION**
+
+使用 `INSTANCE_NOT_IN_WINDOW` 修饰 window, 样本表除当前行外其他行不进入窗口计算。
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -206,7 +252,7 @@ WINDOW w1 AS (UNION t2 PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PR
![Figure 4: window union one table with instance_not_in_window](../dql/images/window_union_1_table_instance_not_in_window.png)
-#### **Example: Window with union 列筛选子查询**
+- **带有列筛选子查询的 WINDOW ... UNION**
```SQL
SELECT col1, col5, sum(col2) OVER w1 as w1_col2_sum FROM t1
@@ -216,49 +262,71 @@ WINDOW w1 AS
PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW);
```
-### Window Exclude Current Time
+### 2. WINDOW with EXCLUDE CURRENT_TIME
+
+窗口计算时除当前行外其他与当前行的 `ts` 列值相同的行不进入窗口计算。
```
WindowExcludeCurrentTime
- ::= 'EXCLUDE' 'CURRENT_TIME'
+ ::= 'EXCLUDE' 'CURRENT_TIME'
```
+#### Example
+- **ROWS 类型窗口,带有 EXCLUDE CURRENT_TIME**
-#### **Example: ROWS窗口EXCLUDE CURRENT TIME**
+定义一个ROWS 类型窗口,窗口范围是前1000行到当前行。 除了当前行以外窗口内不包含当前时刻的其他数据。
```SQL
--- ROWS example
--- desc: window ROWS, 前1000条到当前条, 除了current row以外窗口内不包含当前时刻的其他数据
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS BETWEEN 1000 PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
```
-#### **Example: ROW RANGE窗口EXCLUDE CURRENT TIME**
+- **ROWS_RANGE 类型窗口,带有 EXCLUDE CURRENT_TIME**
+
+定义一个ROWS_RANGE 类型窗口,窗口范围是当前行前10s的所有行,以及当前行。除了当前行以外窗口内不包含当前时刻的其他数据。
```SQL
--- ROWS example
--- desc: window ROWS, 前10s到当前条,除了current row以外窗口内不包含当前时刻的其他数据
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_TIME);
```
![Figure 5: window exclude current time](../dql/images/window_exclude_current_time.png)
-### Window Frame Max Size
+### 3. WINDOW with EXCLUDE CURRENT_ROW
+
+当前行不进入窗口计算。
+
+```
+WindowExcludeCurrentRow
+ ::= 'EXCLUDE' 'CURRENT_ROW'
+```
+
+#### Example
+- **ROWS_RANGE 类型窗口,带有 EXCLUDE CURRENT_ROW**
+
+```sql
+SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
+WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW EXCLUDE CURRENT_ROW);
+```
+![Figure 6: window exclude current time](../dql/images/window_exclude_current_row.png)
+
+### 4. Window with MAXSIZE
+
+OpenMLDB定义了`MAXSIZE`关键字,来限制有效窗口内最大数据条数。
-OpenMLDB在定义了元素,来限定窗口内条数。具体来说,可以在窗口定义里使用**MAXSIZE**关键字,来限制window内允许的有效窗口内最大数据条数。
+`MaxSize` 属性仅支持 `ROWS_RANGE` 类型窗口。
```sql
WindowFrameMaxSize
- :: = MAXSIZE NumLiteral
+ :: = MAXSIZE NumLiteral
```
-![Figure 6: window config max size](../dql/images/window_max_size.png)
+![Figure 7: window config max size](../dql/images/window_max_size.png)
-#### **Example: ROW RANGE 窗口MAXSIZE**
+#### Example
+- **ROWS_RANGE 类型窗口,带有 MAXSIZE 限制**
+定义一个 ROWS_RANGE 类型窗口,窗口范围是当前行前10s的所有行,以及当前行。同时限制窗口内数据条数不超过3条。
```sql
--- ROWS example
--- desc: window ROWS_RANGE, 前10s到当前条,同时限制窗口条数不超过3条
SELECT sum(col2) OVER w1 as w1_col2_sum FROM t1
WINDOW w1 AS (PARTITION BY col1 ORDER BY col5 ROWS_RANGE BETWEEN 10s PRECEDING AND CURRENT ROW MAXSIZE 3);
```
diff --git a/docs/zh/reference/sql/dql/images/dql_images.pptx b/docs/zh/reference/sql/dql/images/dql_images.pptx
new file mode 100644
index 00000000000..17e4a0c8dae
Binary files /dev/null and b/docs/zh/reference/sql/dql/images/dql_images.pptx differ
diff --git a/docs/zh/reference/sql/dql/images/window_exclude_current_row.png b/docs/zh/reference/sql/dql/images/window_exclude_current_row.png
new file mode 100644
index 00000000000..0d6b5c8cab4
Binary files /dev/null and b/docs/zh/reference/sql/dql/images/window_exclude_current_row.png differ
diff --git a/docs/zh/reference/sql/dql/images/window_exclude_current_time.png b/docs/zh/reference/sql/dql/images/window_exclude_current_time.png
index a58a0a54fd6..df6f10809e9 100644
Binary files a/docs/zh/reference/sql/dql/images/window_exclude_current_time.png and b/docs/zh/reference/sql/dql/images/window_exclude_current_time.png differ
diff --git a/docs/zh/reference/sql/dql/images/window_max_size.png b/docs/zh/reference/sql/dql/images/window_max_size.png
index e15562ddf23..51af41f010b 100644
Binary files a/docs/zh/reference/sql/dql/images/window_max_size.png and b/docs/zh/reference/sql/dql/images/window_max_size.png differ
diff --git a/docs/zh/reference/sql/dql/images/window_union_1_table.png b/docs/zh/reference/sql/dql/images/window_union_1_table.png
index ff223682eaf..7fcb9de0522 100644
Binary files a/docs/zh/reference/sql/dql/images/window_union_1_table.png and b/docs/zh/reference/sql/dql/images/window_union_1_table.png differ
diff --git a/docs/zh/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png b/docs/zh/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png
index 9e7d0d7aaf4..546d02bee9a 100644
Binary files a/docs/zh/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png and b/docs/zh/reference/sql/dql/images/window_union_1_table_instance_not_in_window.png differ
diff --git a/docs/zh/reference/sql/dql/images/window_union_2_table.png b/docs/zh/reference/sql/dql/images/window_union_2_table.png
index fd273b563fa..bfd46944e06 100644
Binary files a/docs/zh/reference/sql/dql/images/window_union_2_table.png and b/docs/zh/reference/sql/dql/images/window_union_2_table.png differ
diff --git a/docs/zh/reference/sql/functions_and_operators/Files/udfs_8h.md b/docs/zh/reference/sql/functions_and_operators/Files/udfs_8h.md
index 68656a9ee66..b25bef8d20b 100644
--- a/docs/zh/reference/sql/functions_and_operators/Files/udfs_8h.md
+++ b/docs/zh/reference/sql/functions_and_operators/Files/udfs_8h.md
@@ -151,6 +151,10 @@ Returns value evaluated at the row that is offset rows before the current row wi
* **offset** The number of rows forwarded from the current row, must not negative
+Note: This function equals the `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)` function.
+
+The offset in window is `nth_value()`, not `[lag()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-lag)/at()`. The old `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)`(version < 0.5.0) is start from the last row of window(may not be the current row), it's more like `nth_value()`
+
Example:
@@ -166,7 +170,16 @@ Example:
```sql
-SELECT at(c1, 1) as co OVER w from t1 window (order by c1 partition by c2);
+SELECT lag(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
+SELECT at(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
-- output
-- | co |
-- |----|
@@ -209,7 +222,7 @@ Example:
```sql
-SELECT ATAN(-0.0);
+SELECT ATAN(-0.0);
-- output -0.000000
SELECT ATAN(0, -0);
@@ -727,7 +740,7 @@ Example:
```sql
-SELECT COT(1);
+SELECT COT(1);
-- output 0.6420926159343306
```
@@ -990,7 +1003,9 @@ Return the day of the month for a timestamp or date.
Note: This function equals the `[day()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-day)` function.
-Example: ```sql
+Example:
+
+```sql
select dayofmonth(timestamp(1590115420000));
-- output 22
@@ -1022,7 +1037,9 @@ Return the day of the month for a timestamp or date.
Note: This function equals the `[day()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-day)` function.
-Example: ```sql
+Example:
+
+```sql
select dayofmonth(timestamp(1590115420000));
-- output 22
@@ -1054,7 +1071,9 @@ Return the day of week for a timestamp or date.
Note: This function equals the `[week()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-week)` function.
-Example: ```sql
+Example:
+
+```sql
select dayofweek(timestamp(1590115420000));
-- output 6
@@ -1081,7 +1100,9 @@ Return the day of year for a timestamp or date. Returns 0 given an invalid date.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select dayofyear(timestamp(1590115420000));
-- output 143
@@ -1230,7 +1251,7 @@ Return the value of e (the base of natural logarithms) raised to the power of ex
```sql
-SELECT EXP(0);
+SELECT EXP(0);
-- output 1
```
@@ -1504,6 +1525,38 @@ Used by feature zero, for each string value from specified column of window, spl
* [`list`, `list`, `list`]
+### function hex
+
+```cpp
+hex()
+```
+
+**Description**:
+
+Convert number to hexadecimal. If double, convert to hexadecimal after rounding.
+
+**Since**:
+0.6.0
+
+
+Example:
+
+```sql
+
+select hex(17);
+--output "11"
+select hex(17.4);
+--output "11"
+select hex(17.5);
+--output "12"
+```
+
+
+**Supported Types**:
+
+* [`number`]
+* [`string`]
+
### function hour
```cpp
@@ -1518,7 +1571,9 @@ Return the hour for a timestamp.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select hour(timestamp(1590115420000));
-- output 10
@@ -1544,7 +1599,9 @@ Return value.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select identity(1);
-- output 1
@@ -1583,7 +1640,7 @@ Example:
```sql
-SELECT if_null("hello", "default"), if_null(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
@@ -1624,7 +1681,7 @@ Example:
```sql
-SELECT if_null("hello", "default"), if_null(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
@@ -1674,7 +1731,9 @@ Rules:
3. case insensitive
4. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
5. if one or more of target, pattern and escape are null values, then the result is null
-Example: ```sql
+Example:
+
+```sql
select ilike_match('Mike', 'mi_e', '\\')
-- output: true
@@ -1712,7 +1771,9 @@ Return expression + 1.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select inc(1);
-- output 2
@@ -1875,6 +1936,10 @@ Returns value evaluated at the row that is offset rows before the current row wi
* **offset** The number of rows forwarded from the current row, must not negative
+Note: This function equals the `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)` function.
+
+The offset in window is `nth_value()`, not `[lag()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-lag)/at()`. The old `[at()](/reference/sql/functions_and_operators/Files/udfs_8h.md#function-at)`(version < 0.5.0) is start from the last row of window(may not be the current row), it's more like `nth_value()`
+
Example:
@@ -1890,7 +1955,16 @@ Example:
```sql
-SELECT at(c1, 1) as co OVER w from t1 window (order by c1 partition by c2);
+SELECT lag(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
+-- output
+-- | co |
+-- |----|
+-- |NULL|
+-- |0 |
+-- |NULL|
+-- |2 |
+-- |3 |
+SELECT at(c1, 1) over w as co from t1 window w as(partition by c2 order by c1 rows between unbounded preceding and current row);
-- output
-- | co |
-- |----|
@@ -1909,6 +1983,39 @@ SELECT at(c1, 1) as co OVER w from t1 window (order by c1 partition by c2);
* [`list`, `int64`]
* [`list`, `int64`]
+### function last_day
+
+```cpp
+last_day()
+```
+
+**Description**:
+
+Return the last day of the month to which the date belongs to.
+
+**Since**:
+0.6.1
+
+
+Example:
+
+```sql
+
+select last_day(timestamp("2020-05-22 10:43:40"));
+-- output 2020-05-31
+select last_day(timestamp("2020-02-12 10:43:40"));
+-- output 2020-02-29
+select last_day(timestamp("2021-02-12"));
+-- output 2021-02-28
+```
+
+
+**Supported Types**:
+
+* [`date`]
+* [`int64`]
+* [`timestamp`]
+
### function lcase
```cpp
@@ -1969,7 +2076,9 @@ Rules:
3. case sensitive
4. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
5. if one or more of target, pattern and escape are null values, then the result is null
-Example: ```sql
+Example:
+
+```sql
select like_match('Mike', 'Mi_e', '\\')
-- output: true
@@ -2016,7 +2125,7 @@ Example:
```sql
-SELECT LN(1);
+SELECT LN(1);
-- output 0.000000
```
@@ -2050,7 +2159,7 @@ Example:
```sql
-SELECT LOG(1);
+SELECT LOG(1);
-- output 0.000000
SELECT LOG(10,100);
@@ -2096,7 +2205,7 @@ Example:
```sql
-SELECT LOG10(100);
+SELECT LOG10(100);
-- output 2
```
@@ -2129,7 +2238,7 @@ Example:
```sql
-SELECT LOG2(65536);
+SELECT LOG2(65536);
-- output 16
```
@@ -2385,6 +2494,48 @@ Compute maximum of two arguments.
* [`string`, `string`]
* [`timestamp`, `timestamp`]
+### function median
+
+```cpp
+median()
+```
+
+**Description**:
+
+Compute the median of values.
+
+**Parameters**:
+
+ * **value** Specify value column to aggregate on.
+
+
+**Since**:
+0.6.0
+
+
+
+Example:
+
+
+| value |
+| -------- |
+| 1 |
+| 2 |
+| 3 |
+| 4 |
+
+
+```sql
+
+SELECT median(value) OVER w;
+-- output 2.5
+```
+
+
+**Supported Types**:
+
+* [`list`]
+
### function min
```cpp
@@ -2606,7 +2757,9 @@ Return the minute for a timestamp.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select minute(timestamp(1590115420000));
-- output 43
@@ -2632,7 +2785,9 @@ Return the month part of a timestamp or date.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select month(timestamp(1590115420000));
-- output 5
@@ -2669,7 +2824,7 @@ Example:
```sql
-SELECT if_null("hello", "default"), if_null(NULL, "default");
+SELECT if_null("hello", "default"), if_null(cast(null as string), "default");
-- output ["hello", "default"]
```
@@ -2863,6 +3018,66 @@ SELECT RADIANS(90);
* [`double`]
+### function regexp_like
+
+```cpp
+regexp_like()
+```
+
+**Description**:
+
+pattern match same as RLIKE predicate (based on RE2)
+
+**Parameters**:
+
+ * **target** string to match
+ * **pattern** the regular expression match pattern
+ * **flags** specifies the matching behavior of the regular expression function. 'c': case-sensitive matching(default); 'i': case-insensitive matching; 'm': multi-line mode; 'e': Extracts sub-matches(ignored here); 's': Enables the POSIX wildcard character . to match new line.
+
+
+**Since**:
+0.6.1
+
+
+Rules:
+
+1. Accept standard POSIX (egrep) syntax regular expressions
+ * dot (.) : matches any single-width ASCII character in an expression, with the exception of line break characters.
+ * asterisk (*) : matches the preceding token zero or more times.
+ * plus sign (+) : matches the preceding token one or more times.
+ * question mark (?) : identifies the preceding character as being optional.
+ * vertical bar (|) : separates tokens, one of which must be matched, much like a logical OR statement.
+ * parenthesis ('(' and ')') : groups multiple tokens together to disambiguate or simplify references to them.
+ * open square bracket ([) and close square bracket (]) : enclose specific characters or a range of characters to be matched. The characters enclosed inside square brackets are known as a character class.
+ * caret (^) : the caret has two different meanings in a regular expression, depending on where it appears: As the first character in a character class, a caret negates the characters in that character class. As the first character in a regular expression, a caret identifies the beginning of a term. In this context, the caret is often referred to as an anchor character.
+ * dollar sign ($) : as the last character in a regular expression, a dollar sign identifies the end of a term. In this context, the dollar sign is often referred to as an anchor character.
+ * backslash () : used to invoke the actual character value for a metacharacter in a regular expression.
+2. Default flags parameter: 'c'
+3. backslash: sql string literal use backslash() for escape sequences, write '\' as backslash itself
+4. if one or more of target, pattern and flags are null values, then the result is null
+Example:
+
+```sql
+
+select regexp_like('Mike', 'Mi.k')
+-- output: true
+
+select regexp_like('Mi\nke', 'mi.k')
+-- output: false
+
+select regexp_like('Mi\nke', 'mi.k', 'si')
+-- output: true
+
+select regexp_like('append', 'ap*end')
+-- output: true
+```
+
+
+**Supported Types**:
+
+* [`string`, `string`]
+* [`string`, `string`, `string`]
+
### function replace
```cpp
@@ -2967,7 +3182,9 @@ Return the second for a timestamp.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select second(timestamp(1590115420000));
-- output 40
@@ -3866,7 +4083,9 @@ Return the week of year for a timestamp or date.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select weekofyear(timestamp(1590115420000));
-- output 21
@@ -3895,7 +4114,9 @@ Return the week of year for a timestamp or date.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select weekofyear(timestamp(1590115420000));
-- output 21
@@ -3924,7 +4145,9 @@ Return the year part of a timestamp or date.
0.1.0
-Example: ```sql
+Example:
+
+```sql
select year(timestamp(1590115420000));
-- output 2020
diff --git a/docs/zh/reference/sql/task_manage/SHOW_JOB.md b/docs/zh/reference/sql/task_manage/SHOW_JOB.md
index 57bfeb14647..5c7de4c39f8 100644
--- a/docs/zh/reference/sql/task_manage/SHOW_JOB.md
+++ b/docs/zh/reference/sql/task_manage/SHOW_JOB.md
@@ -1,34 +1,38 @@
# SHOW JOB
+`SHOW JOB`语句根据给定的JOB ID显示已经提交的单个任务详情。
+
```SQL
-SHOW JOB;
+SHOW JOB job_id;
```
-`SHOW JOB`语句显示已经提交的单个任务详情。
+
## Example
提交在线数据导入任务:
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
-
+输出如下。可以看到该任务的JOB ID为1。
+```sql
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
查看Job ID为1的任务:
-```
+```sql
SHOW JOB 1;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
diff --git a/docs/zh/reference/sql/task_manage/SHOW_JOBS.md b/docs/zh/reference/sql/task_manage/SHOW_JOBS.md
index 6619006f6c9..4fbf46c7391 100644
--- a/docs/zh/reference/sql/task_manage/SHOW_JOBS.md
+++ b/docs/zh/reference/sql/task_manage/SHOW_JOBS.md
@@ -1,16 +1,16 @@
# SHOW JOBS
+`SHOW JOBS`语句用于显示在集群版下已经提交的任务列表。
+
```SQL
SHOW JOBS;
```
-`SHOW JOBS`语句显示已经提交的任务列表。
-
## Example
查看当前所有的任务:
-```
+```sql
SHOW JOBS;
---- ---------- ------- ------------ ---------- ----------- --------- ---------------- -------
@@ -20,25 +20,25 @@ SHOW JOBS;
提交在线数据导入任务:
-```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
查看当前所有的任务:
-```
+```sql
SHOW JOBS;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
1 row in set
diff --git a/docs/zh/reference/sql/task_manage/STOP_JOB.md b/docs/zh/reference/sql/task_manage/STOP_JOB.md
index 09037ca992e..bcfee44d8eb 100644
--- a/docs/zh/reference/sql/task_manage/STOP_JOB.md
+++ b/docs/zh/reference/sql/task_manage/STOP_JOB.md
@@ -1,32 +1,34 @@
# STOP JOB
+`STOP JOB`语句停止已经提交的单个任务。
+
+
```SQL
-STOP JOB;
+STOP JOB job_id;
```
-`STOP JOB`语句停止已经提交的单个任务。
## Example
提交在线数据导入任务:
-```
-LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
+```sql
+LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append');
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData Submitted 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
停止Job ID为1的任务:
-```
+```sql
STOP JOB 1;
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
id job_type state start_time end_time parameter cluster application_id error
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
- 1 ImportOnlineData STOPPED 0 1641981373227 LOAD DATA INFIEL 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
+ 1 ImportOnlineData STOPPED 0 1641981373227 LOAD DATA INFILE 'file:///tmp/test.csv' INTO TABLE demo_db.t1 options(format='csv', header=false, mode='append'); local
---- ------------------ ----------- ------------ --------------- ---------------------------------------------------------------------------------------------------------------------------- --------- ---------------- -------
```
diff --git a/docs/zh/tutorial/data_import.md b/docs/zh/tutorial/data_import.md
index 8d574bc2546..89fa57a27ad 100644
--- a/docs/zh/tutorial/data_import.md
+++ b/docs/zh/tutorial/data_import.md
@@ -11,9 +11,11 @@
```bash
> cd java/openmldb-import
-> mvn package
+> mvn package
```
+`mvn package -Dmaven.test.skip=true` 跳过测试。
+
## 2. 导数工具使用
### 2.1 命令参数
@@ -21,7 +23,7 @@
--help可以展示出所有的配置项,星号表示必填项。
```bash
-> java -jar openmldb-import.jar --help
+> java -jar openmldb-import-1.0-SNAPSHOT.jar --help
```
```
@@ -32,8 +34,8 @@ Usage: Data Importer [-fhV] [--create_ddl=] --db=
[,...] [--files=[,...]]...
insert/bulk load data(csv) to openmldb
--create_ddl=
- if force_recreate_table is true, provide the create
- table sql
+ if table is not exists or force_recreate_table is
+ true, provide the create table sql
* --db= openmldb database
-f, --force_recreate_table
if true, we will drop the table first
@@ -60,12 +62,13 @@ insert/bulk load data(csv) to openmldb
重要配置的项目说明:
+- `--importer_mode=`: 导入模式,支持insert和bulkload两种方式。默认配置为bulkload.
+
+- `--zk_cluster=`和`--zk_root_path=`: 集群版OpenMLDB的ZK地址和路径。
- `--db=`: 库名。库名可以是不存在的,importer可以帮助创建。
-- `--table=`: 表名。表名可以是不存在的,importer可以帮助创建。但请注意,如果导入到已存在的表,需要表内数据为空,否则将会极大影响导入效率。
-- `--files=[,...]`: 导入源文件。目前源文件只支持csv格式的本地文件,并且csv文件必须有header,文件的列名和表的列名必须一致,顺序可以不一样。
-- `--zk_root_path=`和`--zk_cluster=`: 集群版OpenMLDB的ZK地址和路径
+- `--table=`: 表名。表名可以是不存在的,importer可以帮助创建,需配置`--create_ddl`。但请注意,如果导入到已存在的表,需要表内数据为空,否则将会极大影响导入效率。
-- `--importer_mode=`: 导入模式,支持insert和bulkload两种方式。默认配置为bulkload.
+- `--files=[,...]`: 导入源文件。目前源文件只支持csv格式的本地文件,并且csv文件必须有header,文件的列名和表的列名必须一致,顺序可以不一样。
## 3. 大规模的数据导入
diff --git a/docs/zh/tutorial/images/openmldb_sql_tutorial_1.drawio b/docs/zh/tutorial/images/openmldb_sql_tutorial_1.drawio
new file mode 100644
index 00000000000..2a2a602ecaa
--- /dev/null
+++ b/docs/zh/tutorial/images/openmldb_sql_tutorial_1.drawio
@@ -0,0 +1 @@
+7Z3tk6I2GMD/Gj7uDpH3j+h6d+3cTjvddnrtl5ucsMoVwcHsud5f34AB0Qdc8CW6STo7PYgxIvk9D+FnAM0YzV8/Zngxe0yDMNYGevCqGQ/aYIBMw6X/5CVrVuLom4JpFgWbolrBU/QzZPXK0pcoCJc7FUmaxiRasEK0KZykSRJOyE4ZzrJ0tVvtOY2DnYIFnoY7recFTxMch6Da31FAZptSd+Bsyz+F0XRWfjKyvc0rc1xWZg0vZzhIV7UiY6wZoyxNyWZp/joK43znlfsl+O375+8vKPi++nf65+vkF//Z0+82jX3o85bqK2RhQs7btLFp+geOX9j+0saO5pma72pjW3NdbehoY1Mb+ppv5iWeq3mGNnY1H9E6hG3akqzL/U330iJfJPhbXjRcEpwRhoWh0wLa0QRHSZjRAlSsxzFeLKOi+kNRYxbFwWe8Tl9I2VC5Nnym735in5a/G8fRNKHLE7pr8iaHMf4WxkM8+W+apS9JMErjNCu2zPhQ/EerdNyd7Lv9CDMSvtZgYrv3Y5jOQ5KtaRX2ahkZLFJsna2vttzZJiub1Zgzbft+YDHkGe7TqvVtl9IF1qs9etiEPXywy/7IER/O0iz6mXdUzPqk3o3F+iqaxzihsYODvaJhWuSKvIikC7YUh8+ELX5LCUnnbCVju0FvJCHI0sWfOJuGZZXnKI7LLk3SJAdskUYJKfaaNaR/dP+O9HtLs+g3G9F1tF2nf3n1jIzSZEkyCmHebIiXZBUuyVnZMTqzw2AxurKiX4gTC3BCq7eRQr8/iXD8B03YOJkWcT4j85jF5GoWkfBpgSd51RU9rGyiPk/veNu3DV3ZD5eU7t3nuMjIsygIwuSsPWh27sFalzk9e4w1tt2PvVvDMU17CSY06uj3XQIMqu08ngwbkPGi0DjqwCA6KQMPoDJXqPRCBZmSsOIAVOgROVl+JdE8VMT0IKY606r1MRqcFZlOzfHIL3CQsoEGz4lipgczRsNw0z0rMl1a40HMoC3NrBcqzfRBxmxIM0Iig+AgZpGlP6JkooDpBYwnCzDwBGkSkbWCpQcsliynSC5gpZUTpeOup+MacOSr4+BBqF21S5BQ3M4dKHoCKYdhNTL+WoaZr+hQQg7mPQjLo8ojysc15RUEUBnog8Gdnv/Rcpp3DF9n/1MEKT/XZFvgmRDS7xUuSs01wQLnnoz8p08KFiXlGmCBAxmFitJxjQMZ+Mvi04xu7gxHChil5BqAgb8RtYKinNz1nBxqONjxlXIIDlkMmXNKFTlKy6GmebZ2nIfMcoETujzNl5mq27xAP6j+miJJKbyGNAm9zGP78UmhIrXDg/OjmpJQ5fWM3OuhutdTielk2mTyffBUayC57+vPizzCDw6Rfv/tScGihF8TLHDYI7fwO4IVaYxf06QqOOzZWkA1yjkZLmnsoKFk4E3KQOvaMtCAMtCUOYcY3XOI8DlDyUBOJMkmA6HgeZT6F4j+qEgjAw04GeugDHSUDDw/bRLJQAPKQGTJLQP78yKNDDTgObvkMrA/LNLIQANeDSW3DDyCFVlkYPmRSgbyg0saGWi+cb9DJQOvIwMbkhtfGWiWMbPNOja3HFLs4gfPtnklkmmGg4j24R5M1Q1SC6D/C8lkxr5Tx44vw0spxDuz4c6st6oQufN3YbYkk4omtNUcLxIWFB5pNKPZc85hoRmN29GMgvInkXg0oejmOgtRUIKkUZEmFNdcL0QWlB9p7KQJVfbwV0XPifRI4yuh2x6G0a9RMlUInYaQNFbSah/rKCt5PSt5A8/0gJPoXZVVmkJHGcc7CypsZRw5sSWZcbSg3OZ4TbOg8EhjHK1us6sr4+hqtzWxUVD+JDKOFnTeljKOJxMkjXG0uk3N3lhIlawugJo0ctKCclvJyZPpkUVOWg1qW8nJcyAkj5z0WlFRcvKKcvLqTzix4UTtdlTkzCpe514VPYvY0GQrOcmJLcnkpA09OMdrrAWFRxo5aXebt70rJwdKTl6YP4nkpA31ONKVnTwZIWnspA3tNs9LswXFRxrjaEO5rYzjyfTIYhxt6Kubxk+VhVSjpQvQJo2ctNunwyk5eT05aVz9SS82nJU94JlVnLFvj/tDwzGr2OqRzOWuKDejg5wc3sARSyi2JJOTTsPTYLjKSRHhkUZOOlBtH5STSGMzJ42bkZMi8ieRnHSgHuf6hGhBCZLGTTrXfYaMoPhI4yYdqLb5XugvJD2yuEkHmm3uT5YWlCFpjKPjtKKijOMVjePVHyfjwKnWlsoqTaGjjOOdA/W0Mo6c2JLMOJZjr6vdHVJEeKQxjm63edu7d4fUb2o6pIj8SWQcXei8ed8dUkSCpDGOLjTWyjiejI80xtGFwloZx5PpkcU4ut1urn0rD7MRFDdp5KTb/swSJSevKCe7Pt6mfOTn2eWkC2dlt3vsC2QVz3nQadzecFYpQ+d0OXmoE/tnkf3WeGQRaLJvWU4KxVY/Ofn+UYMenO+NJEWEp6ucfPf0lIfVXnLytq7VFpG/znLyeAA7NceDQKjHDc5yUkSCusrJ4wHq0hoPfqDc5iwnRcSnq5x8//hAt81XTgpJT0c5+f7p6XYf7huSkyLi1lVOvv/RurpW+xblpFk+6PpqMyc9KAGQGkI3xo6aOnmHdHjWn6tIrg9GEooYySZEIr3hGmyuU7VFpEeaGZFIh+fs7+3xNSICKNGUSKQ3XYWtADoNIGlmRCL96hdhi8iPNFMikX7tq7CFxEeWOZFI7/b0LOUdL8qbNJMivaY5uHvshEngZ1nRU5MYL5fRZJeaoq/C0jeGrxH5Ulv+J1++t9jaw2vtpYdSSNLuy9Zf6iu1d+Wr27cVa9X7gmn4xDY0zcgsnaYJjsfb0p5icJm+ZJPwUD22d0hpP9sqMuLyDTzIW40Iq4GIsiwLY0yiH/W2DkL3e25ftzhX1zkyrYncPdI2X5y9a0sZbMjeawjtNbTZMaAhyg9e16oxO9y+wcbe57Bwad2uvfreTnW6sNmA84ZOQ6Ye25qva0MnX/BGmj/ytLGjeb7mmisUfEV6kK/7vubRKpbmftB8o6hraUM9X3AfNH8MI5Bi9DnPjbuBh+NomuRRSeEOaaIc5gkumuDYZy/MaWItwiALl9HPWgrftfMPPVJpmS/2U+lA3z1Q0DL9YIrV7+mZmrvby+eBvXzSUvmW9Pl5GZI9IM6DQMP9nA8jcFz3qwS8f7vnNxOwiW4rA1dmvpz27hybgq29hgYXSsF7qX4vp75dH3HIwUjvEoF0S6oQ1L8iFYOnxWA5nH57EGTfVAxWkVL9uHtkCFax2xbLFwpB0+oXgiafEGy6IuK06LlYJBwTl2eOnvIKvTejx76Rc4gS8mPPGfagtPQLBYvZHJStwbJf3+ASLA33Njp4vBp83ay943OGbYY4w0lDdeOwdTmwOs/4zEA7zSJzt4VjziHoapampF49w4vZYxrknTL+Hw==
\ No newline at end of file
diff --git a/docs/zh/tutorial/images/openmldb_sql_tutorial_2.drawio b/docs/zh/tutorial/images/openmldb_sql_tutorial_2.drawio
new file mode 100644
index 00000000000..31dab5eb887
--- /dev/null
+++ b/docs/zh/tutorial/images/openmldb_sql_tutorial_2.drawio
@@ -0,0 +1 @@
+7Z1dl+I4koZ/jS+p4++PS6ikps5sVU+fyd3qmb2p4wEn6S0ScsB0Vc6vXxksEggZwrItGyv69EyD00SS9qOw3ldSyHA+vvz6yyZ+ff66nidLwzbnvwznwbBtK3RC9p/8yNvhiBeYhwOLTTo/HDo58Jj+Jyk+yY/u0nmyPTsxW6+XWfpaHLQOB2fr1SqZZWfH4s1m/fP8tKf1cn524DVeJGfR8wOPs3iZgNP+SOfZ8+FoaAfvxz8n6eKZ/2bLjw4/eYn5yUXg7XM8X/88OeRMDefjZr3ODq9efn1MlvnF49fF+m339PXT58dP3xffNj/Gu28LMx4dgn2q8pHjn7BJVlmzoe1D6D/j5a64XsY0MCLXGIfG1DfC0JgExtQ1JmNj7OZHotCIHGMaGmOLnTPKzx5PjdA0pp4RPhiRt3/BTgr2P2JHJsY0yk8Io1FmFX/KNnvj94dd1df8ZRb/Kz802WbxJiswckx2gIGRxekq2bAD1v79chm/btP96Q/7M57T5fxL/LbeZTwQfzd5Yp9+LH5b/ul4mS5W7PWMXco85GQZ/ytZTuLZj8VmvVvNP66X683+mzmf9v/kMdLlkh9frVf5t0TekeLP/TPZZMmvEx6LO/SXZP2SZJs3dkrx05HlHD7CW5tV0PjznV3X9Q7Hnk+4tQPeZor2sjjGfmeCvSiwqICIAxG5eg//nreRyfN6k/4nv3PL4iad3tf9+5/pyzJescYXzy8OTdb7ZJMfytavxatl8pQVL/+1zrL1S/FmU1wDU4jGfLN+/e94s0j4KYJ7+bpOV9n+qnkT9i9rRR/ND57hsb/sI3tvvb9n/+anb7KP69U22zAq87BJvM1+JtsMBxOSHBtNTkGKYyJBaYsTF3DCTi8jhf39WRov/84yfrxa7Bv+c/ayLBrpz+c0Sx5f41l+6k/2XDqkgfz5EL/fW8GtrIbLml3dp+U+pT+n83myavQOOug7eHLL+MMVe8eKYO/XsXK0eMny4CrOWKtjf+8WYHD8nvJk+ICMF0Kj0mMhgI17kKQEgJTX3Wb2HG+T71n6khAzFZixXAiN5dhNUoMKpwCbsByb+CUjaipQYwc2oCYMm4QGE00BM9GVVPP2SqmmCjSOD1NNWLHveQMaRDQF0FgCvUxiqH9iyLMAj2Ix5JQDWUsMWVA1l1sf95xStj+SbPZcfFXk7bTw9/NWF/XK/ZPo8F5EU5FQPMDJVwJFApSKQunuuYGa2jZte2Tm/7LjFrtTY7P4P8JJFie8hpIGChVOBVFQe5sfhslOa7igxZM0LZhoKmCBiptIaUUxyZOCiKaAFN7tJcXUb8UUuIBHsWIyy4GspZg4FyegODrnlGPDqS+Srtwyic7uRTQVOQQOLX7V+nlTgY2KuujuUYF62rD9ZZ5ct6/xir1e5K+PWsnJtZJ1qpUOJ7Nffno+wdaOapLGDRVOBW9Qh2uumqrjglZN0rRgoqmABUpsIqUV1SRPCiKaAlKc8hRCIqk7kRRhRVJUzl+9uZhQTbs6pxDeThoQSVdumUTP9yKaipQBRxy/ai2gK7BRUSTdPSpQT18VSQGJpOZhw4skadxQ4VTwBkW55iKpOi5okSRNCyaaCligoiZSWhFJ8qQgoqkgJSoFg0RSdyLJ4uM2nQ0l8SfrSQrxleWQ/SV+iHxfVSJZbOJ5yu7hBUzH1ap7oCVmy/DmRQNQI9cGQCmcpaecqJZp0WxIyhUtoL2ltpz+qK2B4qfRIJUL9b5K/TVQgLQZtnJFY+rETh12tBnIcsNSVEijdajRXKxGa2uBlAvXX5ajcs8+j0xC4a2GFkiNPDjiOdC5fy2DotkCKQ9K9quqKzT0HeNSQ55Ga6k8KPmHOuClhh1tFlZ5oqF5wkYSG21WWXlBKSUkszqUWb4P+FNblM+DCzXLR01bMG+C6difVodGYVbhTYfq9I14tuxmCuHQaNGsdJ8vWuh7S3DZ/RFcA8VPoyqAPpT8ioe5hgiQNgUB/QqFrokdqgt4xk75/DLSXx3qrzAA/KnVXz5c8kmGzmWrIek18uFw6EBXb7UMimaqK4CS/arqsoximMvph+oaHnkaCa4ASn4a5qrDjjZai/+dNMzVBDbayKzAK6WEZFZ3Mss2Q8Cf2tmEAVw0Wo6KbgmFtxqaTTgK4HAolVuXAUWz2YQBlOe313CZ/RncGh55Gs0mDKHGJ5lVhx1tZhOGFYpvEzY0m5BjU15TjmRWhzKLV//rrLBGCFeClk881S2h8FZD9TJGIRz1pEVbMqBoViojFG0rc0tm9WgO4fDI06hKRgg1PsmsOuxoUyAjEg3CEzaS2GhTGyMqn+BDMqtDmYXePLgtmRXBRaAWPYgumw3prFEEhz2/0rinBCia6axItBENFcfokDyNdFYERT7prDrs6KOzRKPwhI0kNtroLMsU2IpTz4g8I/SNqW8wvCdB/mLCjlj7F6YxcY1paIwtY5w//Ubsf8Y0MMZTIzTzD4cP+efzF6HBHqf5j9iRiTGN8hNyL2n/mewKogVqZ4LOcQ4kZkwSJZsCAajGZs/pcv4lflvvMh6Iv5s8sU8/Fr8t/3S8TBcMvIcZAygP2eS+15Ufcw7fe6YQW64Pc5frCXpJjueVY1hLbVlmhbRCwrw5eEx8PZ+CFr7H1S1Y+KZodVjZ/t+fszD+L+/54+bnj//NRn9E458jaPTt2PlDfAahb2JzuwRcuWkSquoimornjGUDPF4Ij5Y0+P3TAusNvO42s+d4m3zP0peEuKmvoO1GyUGFU4IONIiP6MQvGZFTTT8LhFDUKDiYaEq4gfO33lPO2yulnIoK2hOkHLPZlIMJpwQd0dQcEkk9FEmeoOaJUCQ1IKiFIkmwKlxZYrmL/ULeG1N90XTlJkp0gy+iqUgsNsSF9laT56Wiirp/fKDmPg5b5sfPV+ERVbWowmssaa5Q4ZSABeV5SLum1WcIrbakEcJEU0IQVOkfx4+fiaCaBOFll3wWwoRTwpBopg7Jrh7KrtAGUKodm4IdIXpYlTQmGquyHEGtf5Jd0rzoNnjlQNV+VwVRhgqgTqNgDnziWTYptNoM6TMe5kCN//vfHgmgmgDpNC7mUOX/fuoxyxTsvSYUZH45hLUEmWATUEotwsbTgCC7chMletQX0ZQkEsEOACTIpHmpKMjuHh8X6vnbgqxHS/qGCiBekEkjiAqnhEFoCqjegW2QDKEFmTRCmGhKCIKSngRZfYDwgkw+CWHCKUGI9gjoqSDjft1NQRaUQ1hLkMHxeHo2iRtPA4Lsyk2U6FFfRFOSSDrdK2BwvFQUZPePj8yWATYJsrYBxAsyaQRR4VQw6EFTwCJBVp8htCCTRggTTQlBUNLTHMYGCMIrMvkshAmngCHbo+0EeqrIvAhAqHbOIlyPSg8nceOhOYvsWgjKXZIik+ZFszmLtie7u0BP9soeKoAazVm0PegK0BBZAwxpM2fx+IfeyGIHlUYJqw3YdJrf6NMmBT0VbyG2GGIDm24LxRvsjQ+8irNZ4YbydkObbrNr0aluGxIqmm27bfkVNyrYSza3P4NoA2QPr9bkN97GhFOCH3QMosHvVdAuPmihJk0PJpoSeKDUVzmXcYDs4HWXfOrBhFNBD9/Tg3RXz3SXjV5X1tagGewTKfUPg+nYn1anRuV4PG88NGjGrgUcY/1a7ukQLzRodn5poHa/qsAso1hX1p9Bs0ECqNGgmRXAJ57qaYyDZEibQTMrgEJe8bqyQQKk00BYWO5YkyDrVJA5EEK1Be8FG5pSahE2Hip4z64FXIZIgkyeF90K3odQz99boY9BAqhTbfwQmgKWR4KsNkP61MYPoaQnQVYfII1K49suVV7sqSDjW3V3JsjgyDs9m8SNhwQZuxaCQh8kyKR50UyQ2YIiC1cFWUiCTAmAGgkyW1CmQfVmZYNkSBtBZns2IIgEWX2AdBJkvII1CbK+CbKw6xEyC/aR6OEkbj2kyFieFewJTYpMmhfdFJkLd964rchsUmRtA6iTInPhKD/rqJEkqw2RPpLMhSP9JMnqA6SRJLOi8nxDkqxLSeaY2DGytnYns2Afe+ALm6sMvfOGQzuTsWsBu9MK1diQUNFtU7IIdoJvCzG3P0JsgOzptB9ZBDvQltJqiwPkR5+9yCI4yXXw9TvaZUenbcgiGgzrqfJyLACh2vodlg0SCz2SQMOh2h0j1i8GpJDykkJFs7IdtikzBOaR8mqRPY0qdthm16NfA+RHm2odttntwNcA2RlmoQ5x71rw5JsGRuQa49CY+kYYGpPAmLrGZGyM3fxIFBqRY0xDY2yxc7IbdT4uRJpzUFRMwqWrZFPcSKiwZs/pcv4lflvvMh6Iv5s8sU8/Fr8t/3S8TBcMn4cZwyAPieFIwC+WpqosjXxeeb5QVJ4NFZXvCnW4/aG9KYYir5kUeIMK/HpzqzD0iaTFbIsU+Hhh56t6urBrHAUPZhD05BFzvTHVF+FX7qKElLqIpuJ5Am3gnUJcVHZGmoejouy+e1ZgVYMXSi116REJ52HiAxcis4f3avs9S18SoqgeRbYlkECO3SRGqHAqOIL1Dw4cxS8ZPbgq6WbXhp3VsElmMNEUIMN/JUw9b6/qUs8gmHEFiSZs9HGFiaaCGTiD9HWz/jNdzYiYasREuhBjA2JmafZGtFShxdNFTdmiMUxy9Prn6AkGF8SOXls7kdqC4UZlSeUu9JKNv6kD2lRSfCmg/fs/22Qz1vkpVAGP4W5BKr4yne5dO7TcUtXRu3t6oCF8nHKVH7fM0ylXBFUdqPAGn/zukJhwKrCCRrHSDZEG8QRDe3vSuGCiqaAF2sEfx4+fiZZWXD1pWjDRFNDCp2ec0EKstOTn3T0r0AF+fGZf9zlOiZhWPL177xDzDhR5ej339AIHdo6UztJzoPurtARg/yXSsS3RJD0HV2ii8PloaVNDWGk2vc8RbKVe/viinESz+84uTcWd1B2jd/tEDJE/jeYFOiI3Wm/bsDov2kwJdLrdM30QsGgzF9CBHrPerqEEK7rMAuQdvhsdoXcnUR+91hpc2kwadMsnZ5Cf2KGfGGH9xLaqbvFu20nWcZXlkLvQTsddV7SqvCW+FLjySeQnNovVcKt0ia8MdK3VbnM0qJxU1U+8e3qgG33VTwzIT1TBH95PlC+1hAmngkDoaFue3n5idV7QfqI0LphoKmiB7rPmfmJ1WNB+ojQsmGgqYIHms95+ogQrWD/x7lmB3jP5iS3DhfYT772X7ZV3Z8hP7NBPtHitrc4mKHpw8rOvLIkoF0+LTTxP2U28oIn9ffN9LFNSYPH2RZMYRx50qMl0bB8rzSYxeoLd/GhBc014tJnE6FXc0W9vOjpkOrbMn0aTGD1oew92EqMaeLSZ0ehBv3qwC6HVoKPN/EYPmteTvxI48uDoMtnRg0b2JEn/mq4WRI80PdrMZvRoF9F+uo9O1+4j3wzsJK+EyjLKfaio5jYSvfc04kOrmpzF9rHSzFn0oX9Ny6PrwqONs+jL7FhL0xnb5k8jZ9GH3rZHzmIdeLRxFn3cXOyD20j9q2Yp08aE9KF/TSZkHXB0MSF9gXtNJmRNerQxIf1yY4lMyC5NSA9rQlrlBNYzIeHM63LDWkvBxdtOAybklZsokUYuoilII/xrkAmpFKuKJuTdUwatblpTXRcetAl59/RAC/u2CWmTCdkyf3gTUhpAVDgVBEIb3DLJhaxDD9qFlIYHE00FO9DAHuoKazXkoJ1FaXIw0VSQA/1rchbrgIN1Fu8eHFxR66PbSLKuWdDQJuTd98yDUkrIhOzShAxC2LlSakIGcIa1rSyj3IXg4m2HTMhRgKsVkpuQE3paNYaVZiZkCK1uMiHrwqONCRniZmsfTUjLKGZCOmRCtsifRiZkCG3wwe4vrQYebTzIEPrX5EHWIUcbDzKE7jWtza8Dji4eZAjN60HvQq0GH22cxbC8ah85i106i1EEu0xCZ/HKpui1nMUQTpv2lGWUu1BRvO004Cze+872IbShyVlsH6uKzuLdUwb9a6reWBcetLN47/RE0Je+Xb3RpOmNLfOHdxalAUSFU0Eg9LapemMteNDOojQ7mGgq0IGmNDmLdchBO4vS5GCiqSAHetLkLNYBB+ss3j04uGrXOu4so4Y0tAl59z3zcmOJTMgOTcjjXMLOCj1GcIZ1+VRYLQUXbztU6HEUQceaTMj2sdKs0GMErW4q9FgXHm0KPUbQwr5tQtIa67b506jQo2VCH9whF7IOPdpUerRM6GCTDVkHHW3KN1omdLDJh6xDji71Gy0TVxybjMi2UNOm2KNluqWYkBPZpRMp6mEpdSItE060ttR1mu9Cdx2bD5mR7FpA6zp3HtXtU9T351AVWjTzGC0TWtlfafZ1XXy0cRktEzcfm/aTUUygVj4jtLoVdpgG+PTTx2bknJDN2BQ7+viMFrSoyWeshY42RqMFLWoyGpWypo/TaIlm116Ak6zm481mf69my3i7TWfnyOzvVsIdxuRXmv0jf/3BK9798+QnD79O33ALMpkvksfiF6432fN6sV7Fy+n70ck226x/JH8UF9AV3n3rt93T10+fHz99X3zb/Bjvvi3MeMQX5q13m1ly7UoUZmDGncyygMX1yb/yVZRObrYnuNn82CZZxln652msqzz9njupJz35wt4pDEovvJg2e/i7iw+98wPiHKU+dzr5dlU80OG6gEAMjPjt5LTC6C39viOH7zFa/CK32Lav7IvZvP8uPp+9OHyFhpvFrb0ET9qH/+/dOv/B03qVjbZ7932c61j79Rf7z55Uc/+zn8Xtz38a7MVt8cljyp4GxnhqsOfQ1DPCByPy9i9CIwr2P2JH2DlRfkK+BhWX9nNQv+SZ9bzRxst0scpbNGs+CUuzkzw7prN4OS5+8MLS8r7pbRL2R53k/nMv/6FCIrbKErFtnj9ijCIXliboEcsujus10oSs8LwNjfjX5CHWT0/bJLugriHOBKWb2S1nN559qamf3/tJkL+YsCPW/oVpTFxjGhpjyxiHGczWZwNFxvmgkHMYwVllcbrKb/qeBDiiM3tOl/Mv8dt6l/FA/N2ecp6qhRwhnuKC3sMhv/OD8+Qp3i2zylm+goXruOdphRclPU3bliBvO777gVegaH50xxLNHb52g2kksJGRQAu/KqYAxoE9RCEvttsaK9DYYjm7FBc9vVGrubLLV26kRF//IpqKvr4N7axZvJmv1sqYUSkVWyCkfPjPcholBhVOCTLQxXplguI53ibfs/QloWxTkyXbFMBkNwsTJpwSmKCvdYQpfsmIpZosOTZkKWoUJUw0JSTBqZjvaentldJSXZRE4zTHFRsNpSVMOCUwISxQ0lx90FyeC6AUay6/HMp6msuGfg2VfitrT/Ul15X7KCG5LqIpyS3Qz2EPL3aBTM/P90wyo3xZpW06rldeAfeeVVjbBFWVZNJEocIpQUqw1RafP5cfP68VSJmpFld4eSYPFiacErCgnxiqXJA7VIbQskwaIUw0FQQ50GD8OFY462moBOHVmHwWwoRTwpBo4y1SYz1UYyGEUqzGWhstdaANRGqsrD3VV2NX7qOEGruIpiS34EpJ2uUKjWZaNgVbVeEmDR8qnBL6bs0po8LvnSCI13jyDGLCKWEQ+peWTRqvNkNojSeNECaaEoKgo6lyTdRQAcJLPPkkhAmnBKHy4gOk6LpUdMfiIt0NsLnQQiJJV9J+aICN8YLbp90hSdc+bPqNxfGV8JUkHdW3aB1BnYbtXGiCKt3Na6gM6TNs50JblCRdfYB0GrVzaeucnko6bkXclHRBOYQ1JZ2giBxJOnH7aUDSXbmPEpLuIpqSXIIrGueSpGsftqqSTho+VDgl9NHOKL1EEC/p5BnEhFPBoAdNUIskXX2G0JJOGiFMNCUEQVuUZmI2QBBe08lnIUw4JQzZpbiQputU03khgFBx7REPWkak6UraD5UeYdcCN/fNI03XPmz6VTHx4Kw3nKZzSNO1iaBOxU886ILSMF0DDOlT9MTD+aIHnUcJqw3YdCqL4kWluJD861T+hTaAULH886G9RPKvpP2Q/GO82IAX0aPLJ/l3ApvZDmz6yT8fzpC7Lf9cGtJrLd9pKP98aJhGJP/qM6SP/POhhUqzNOsDpJOk88trzJGk61LS2XyX5u5mafrQXfrqUHYRth+apcmuBW6eXECSrn3Y9Julyf9UrKSzjGLhHY3otYqgTrM0A2iC0izNBhjSZ5ZmAG1RknT1AdJpkmZQ3kcnSdeppOPFKbsrjxlAx4gkXUn7ofKY7FrgpsmFJOnah02/8piBaOtTqqXSOYI6lccMoAlqeSTpajOkT3nMANqiJOnqA6RTecywPN+QpOtU0qHX3bUm6fjUT5J0t9sPSTp2LXBT3yKSdO3Dpp+kC5H7bXBJF5KkU4KgTpIuhCYo7WrXAEP6SLoQ2qIk6eoDpJWkC0pxIUnXqaQLsaN0re14EELHiCRdSfuhHQ9GFn8s3uhUmyTp2odNvx0PItx+G+eSziZJ1zaCOu14EEETlPX0SNPVhkifLQ8i6IuSpqsPkE5bHkRuKS6k6brUdI4ZAQiFmo5vjdC8pougZUSarqT91Nd01+5jdU13GU1JLsHNfWO5zyRN1zJsFTWdPHyocErowxWWO9d0Lmm6thFEa7oaDGLCKWEQuqAW1cdsACKsppNnCBNNAULH1kKarlGA0JquRhbChFOCUHkRQ9J0nWo6F0KotuYl62iD9EKarqT9UM1Ldi2wk99I07UOm3Y1LxlQSPpONZ1Hmq5tBDWqeXksakbjdA1DpE3RS9uExihpuvoAaVT00rZEM1Yu+ElW8/Fms79bs2W83aazc3L29yvhWi6ZL5LH4sNMMD2vF+tVvJy+H2WCcLP+kfxRXA03/8yvNPtH/nmmtg7v/smjsdcPv07fcIEI77/12+7p66fPj5++L75tfox33xZmPOKadb3bzJIrV+IouzIuGksjmoUjnP+l2F68J7jf/NgmWcZZ+udprKtM/Z7L1ndcLc/+wFri8R++TPuNJ6uLwjuHS1EEeUcKxM0Xd3vnofhsTB7qcK1AKEZL/HZyWqGzS/+CiM9Z4Xq2mFRe9s0sj1eyFX+AvTh8hYbbin2j13jSaPx/79b5D57Wq2y03Zsf47wTab/+Yv/Z42vuf/azACL/abDvWRafPHYqp4ExnhqhaUw9I3wwIm//IjSiYP8jdoSdE+Un5LcH1zHN0f2SJ9zzlhwv08Uqb+asTSUs+07ypJnO4uW4+MELy9b7NrxJ2B918lw4t1IeKuRnqyw/2+b544cdM6/m7RFLHw7ftbRuo+IFigrCRtbFJIr109M2yS6oa4gzwbJEdqcj1xiHxtTP7/2E3XvXmIyNsZsfiRgNjjENjbHFzhlVQmaUWTd8vQsPzzkYbqssTlc5JHtyoAE3e06X8y/x23qX8UD83b5V8GeEkDtEZ0Cgxqs9EvAdgmPtH77HjCvYZMZ1fcEjnXcVm3fcLJGDQvZs2/bse+usMOUGCYvTGivQ72AfGKJXhr6Ljc2vuXbXJLzYi2hKNAB0Il4Ij/ru6UBpgabDKxMUz/E2+Z6lLwlxU61vIRi7O3Y+myEHFU4JOnAq1RGd+CUjciqRY/NCUSe3OgybBAcTTQk3cPrTe8p5e6WUU9Eg5xtUnt5qr1FwENFUgMNHAkgj9V0jeTCXiTVSazNYbOiEq9u0tffTCY4tieau2DyJdLO/75BQqaid7p8c6Owe55jkxy3zdI4JASULFF5U1ZjLhAinhCnoA6ucRdLHLnF1YNBaSpoXTDQluIimvhErbcgneVYQ0ZSwItryguRTD+UT3x7ipnyyy5msKZ+gLaxu/n8vE0tz26FeuWkS/d6LaEoSiWD7XM0fO03sXzpQWHB75x6Fk2P0qy7yMHDDSyhp4FDhVBDnQJtYewlVfXNbrISS5gUTTQkuiDnTxEojEkqeFUQ0JayQYuqnYuIlNW4pptbKYB23QD1JI+VF03TII7ytUM2r0b488aVi0ltPV6CjmmIaACwVlzP3bXPQYeCGVkx1iqIhwikhDjrDuium6sBgFZM8L5hoSnARVXAkVlpQTDVYQURTwkpYigYppi4Vk4Vex3RFtdeUTNAa9pUlkv1Vfoh8X1U2WWziecpu4wVP7O+b72PtmZaZUMObGA1NjWwXer8KJ/MpZ6ptXnQbrHIr7viyl15Of6TXUAHUafjKtQGDiutIDRIhfQa0XNE+2ERPLXr0GeJyaauXngo2zwYIigXblT1k6wk2V7C/tLLU0vuFDW5zRSikdwHGRFOSRQSbAg3TJGwblYoS7P7JwW0PdF7MV9PRL0Xs4dWX/O7lmHBK8INzm4c6FKaIHrTwkoYHE00JO6KpzgSOLDhozSUPDiKaCnC88gRDmqtTzdX9SiwPWtKRsqzCrnIwHfvT6tyoTC28+dAAGLsW0D1Wu3PPsHjRbQDME3nHt9SX3R/1NVQAdRoA87otgTFUhPQZAPM6LIoxVHr0GQDzyuegkRjrVIxhF3m1V3ndg+Y0WTyg5VAVdnYtBDUyhrniq21UdKvI7olc5CsSzDKKATCnHxJsgOzpVNXd16d6hiJ69Kns7oumTxM4suDoU9ndL+eENFeXmuuob7qbdOhDS9qjrHLZcmjSIbsWgiobVMldBhXdJh36MiU3zP4Mew2QPZ0mHfr61N9QRI8+kw59TYpxKAJHn0mHPlXm6KnmcrGTDlsrZuhDDzqgrHLZchrQXI0Wq7uMpiKLBNAupoVeUqhU1Fz3T45MrY0eTTUcIHt4zdVszcNO8LMBfqS5atGD1lyNFkDshJ0OK2wMEBy05mq0GmIn4FBxjZ5qLk5tdwu9AuhBW/Q8Ak2HFnmxayGorkFjojKo6La+K6DqGn1jT6elXQFV12iYHn1WdQVUXaNJcPRZ0BVCi9qYekbkGaFvTH2DAT4J8hcTdsTavzCNiWtMQ2NsGeP8EThi/zOmgTGeGiwa+3D4kH8+fxEaUbD/ETsyMaZRfkIYFZ/JrjBasHYm7pyDEmPSL10lm4IBqMxmz+ly/iV+W+8yHoi/mzyxTz8Wvy3/dLxMF4y8hxkjKA8pp7Os33ZPXz99fvz0ffFt82O8+7Yw45FVlbmQ74PEV3S5pqAIvSfoK7lea/MLwwqTlkmkNyfSj80SLdJFOxaIYHGupKyarEDTeMc+McTHEPo24rPArb22GlVXl9GUPGmgL/xCeLSjxQdACzT8Xneb2XO8Tb5n6UtC3NTX0Xaj5KDCKUEHGoBHdOKXjMippqF9SE7UKDiYaEq4gfbfe8p5e6WUU1FDe4KUYzabcjDhlKADrbtSWkgmdSmT+KTnDmUStOpoA5uS5kSy6fh0pE3bGuFFNx0VQVPmOH6ZHz9fqEdU1aJKJ5UVQTsnpK3Y6jOkj96K4Ir0j+PHz0RQTYJ0El5RhWqmJLy6FF68tXcnvCLBQnJKNuLmRMLLjqAdSMJLnhfthFfFSpV9q5oyVAA10miOCb0jyyaNVpshbTSaY8L5X7//7ZEAqgmQRhLNMUmR9VORWbYPIBRLMrccwlqSzDGhiUhPJ3HzaUCSXbmNEn3qi2hKUkmnNSwHx0tFSXb/+EAD6LYk69H6vqECiJdk0giiwilhULBJDg2b1WcILcmkEcJEU0IQnGtGkqw+QHhJJp+EMOGUIFS+STJJsk4lmSCLiSVZWyv+HAt6hvR0EjefBiRZo8XhL6OpSCUWNAhJksnzUlGS3T8+FbfU7ludy6ECiJdk8hsMYMIpYRC6ShZJsvoMoSWZNEKYaEoIgrPQaCZjAwThNZl8FsKEU8IQ7azdU03Gh3c7m7noWNA0pMeTuPnQzEV2LQTba5Mmk+ZFs5mLjg0dIJwm68ke20MFUKeZiza0lWiYrAGG9Jm5aNuAIFEWO+g0SlhtwKbTLEe+MTjJt57JN9sWbBinVr7Z0GBU9yTrpDqvWSGv8JZDyo1dC2gkDn2T7pZQ0U60Vdy7YC/a3P4MpA2QPa30GrScosFvXtAuPhpJNbhUVuWMxgGyo5PycsqzDCmvTpWXFwEIFSsvB9qISj3EYDr2p9W5UWnrOPj7OvhONP+dp/Kr3NchXkiDnV8a6PNc1WCWUawv68/A2SAB1EmIOaKNUtUOnA2SIX3UmANXySpeXzZIgLSSZEEpLiTJOpVkEdzXULUkE+xySslF2HxIkrFrAb1BkmTyvOgmyVzR/pX3VfJjkADqJMlc6CpZHkmy2gzpI8lcuEqWJFl9gHSSZK5bigtJsk73beaju91JMhd6hvR0EjcfkmTsWkCDkCSZPC/aSTLRXplXJFlIkkwJgFpJMugqqd68bJAM6SPJeA+NJFmjAOkkybzylR0kyTqVZB624kdrhfE96BnS00ncfKgwPrsW0CAkSSbPi26F8T3RLpq3JJlNkqxtAHUqjO9BV4n11EiT1YZIn8r4HlwDS5qsPkA6Vcb3wlJcSJN1qslC7GKy9jQZNA0HvsC5Ul7B71M/+P60D81BhXJsSKjopsR86PvcVmJuf5TYANnTSYT50EeylNZdHCA/+ugvH65/HXwdj3bZ0Ul6+V4pKSS9upRebvczFH1oDtIzCbQcmpzIrgX0AEl6SaGi27xEH9o7t6WXR9KrRfZ0mpIYQM9I7fjXAPnRZzpiANe5kvSqxY5OMxEDges49Y2xaUyC/EX00Rh/9I1pYERjI3SNqWeEkTH+ZExDY2wZ4zA/KZwYk4/7F1Nj7OUnj8dGFOxP/mSMnZ/fM/u7Zc4BlMlqPt5s9hzMlvF2m87OcdyTkHAxl8wXyWPxYaaYnteL9SpeTt+PThgKm7d/8NPzN//M3zCpVbx9+HX6wweuCZEsHRVUxvVf+ZmF4M+/M4a5kfnBsvlA11F9AQxF2osf2yTLOEv/PP11V9H8PRep77/N5oVt+Ee2691mlhRnvaP3/kH+1flV4bumhReBDlcLBGJ3Pn47Oa0QzeVfsHhel34Pngklzw/CixZ2+IIXH+bfdv30tE0yo5U2KdoZt17LkWwXiBb3K82Ogdnrk7js3XvY/I1sazuAeO2K8QXAN9vlsXrTzXapptE5l20F2+j88zbnme20Od43LW9D59+j6vl24JU3uuYalKA6I3jIRa095BhqX/LOzXkLjZfpYpU3X9YKEtbTmeQdlHQWL8fFD15Yz2jfxjbJNv3PSffr3F18qNAXOmaWy76QbZ738owieZY+r8wPNt+xoriVI6tei+CEuBdZ/DxAm2lXNBW2XtotkqNVITmq7ORYv+2evn76/Pjp++Lb5sd4921hxvw23ky6Ni/n1WBnqOWk+4uHcc47LB43matm4cC5jOReRKqbh/kv8i9+kW9F17/Z5Qfca92b5toQdOu7aEOtdXeabkMeVlD0puPC29AlX6Et2YZ86yJSEKhqQ+edj8ofqNqG2NvNep2dnr6JX5+/rud532D6/w==
\ No newline at end of file
diff --git a/docs/zh/tutorial/images/t2_to_t22.jpg b/docs/zh/tutorial/images/t2_to_t22.jpg
deleted file mode 100644
index 24732c94e4e..00000000000
Binary files a/docs/zh/tutorial/images/t2_to_t22.jpg and /dev/null differ
diff --git a/docs/zh/tutorial/images/t2_to_t22.png b/docs/zh/tutorial/images/t2_to_t22.png
new file mode 100644
index 00000000000..096e4aee1a1
Binary files /dev/null and b/docs/zh/tutorial/images/t2_to_t22.png differ
diff --git a/docs/zh/tutorial/modes.md b/docs/zh/tutorial/modes.md
index 64375ad0551..f3f91522d8e 100644
--- a/docs/zh/tutorial/modes.md
+++ b/docs/zh/tutorial/modes.md
@@ -38,7 +38,7 @@ OpenMLDB 针对线上线下的特征工程全流程,在不同阶段提供了
### 1.3 单机版执行模式说明
-虽然本文集中讲解集群版,但是有必要也简单介绍单机版的执行模式。单机版的执行模式相对简单,其离线数据和在线数据的存储和计算节点统一,因此单机版并不区分离线模式和在线模式。即我们可以直观的理解为,在 CLI 下,单机版并没有执行模式的概念,绝大多数OpenMLDB支持的 SQL 语法均可以在 CLI 下直接运行(对于部分SQL命令的参数,单机版支持的选项与集群版略有不同,详见[OpenMLDB支持的SQL](https://openmldb.ai/docs/zh/main/reference/sql/index.html))。因此,单机版特别适合用于快速试用或进行 SQL 实践。但是,在实时特征计算阶段,单机版和集群版一样,依然运行于在线请求模式下。
+虽然本文集中讲解集群版,但是有必要也简单介绍单机版的执行模式。单机版的执行模式相对简单,其离线数据和在线数据的存储和计算节点统一,因此单机版并不区分离线模式和在线模式。即我们可以直观的理解为,在 CLI 下,单机版并没有执行模式的概念,绝大多数OpenMLDB支持的 SQL 语法均可以在 CLI 下直接运行(对于部分SQL命令的参数,单机版支持的选项与集群版略有不同,详见[OpenMLDB支持的SQL](../reference/sql))。因此,单机版特别适合用于快速试用或进行 SQL 实践。但是,在实时特征计算阶段,单机版和集群版一样,依然运行于在线请求模式下。
:::{note}
如果仅在非生产环境试用 OpenMLDB或进行SQL学习实践,强烈建议使用单机版,可以获得更快捷方便的部署体验
@@ -46,7 +46,7 @@ OpenMLDB 针对线上线下的特征工程全流程,在不同阶段提供了
## 2. 离线模式
-如前所述,集群版的离线数据导入、离线特征开发、特征方案部署上线均在离线模式下执行。离线模式的作用是对离线数据进行管理和计算。涉及的计算节点由[针对特征工程优化的 OpenMLDB Spark 发行版](https://openmldb.ai/docs/zh/main/tutorial/openmldbspark_distribution.html)支持,存储节点支持使用 HDFS 等常见存储系统。
+如前所述,集群版的离线数据导入、离线特征开发、特征方案部署上线均在离线模式下执行。离线模式的作用是对离线数据进行管理和计算。涉及的计算节点由[针对特征工程优化的 OpenMLDB Spark 发行版](./openmldbspark_distribution.md)支持,存储节点支持使用 HDFS 等常见存储系统。
离线模式有以下主要特点:
@@ -55,10 +55,10 @@ OpenMLDB 针对线上线下的特征工程全流程,在不同阶段提供了
- 非阻塞式执行的 SQL 由内部的 TaskManager 进行管理,可以通过 `SHOW JOBS`, `SHOW JOB`, `STOP JOB` 命令进行查看和管理。
:::{tip}
-和很多关系型数据库系统不同,`SELECT`命令在离线模式下默认为异步执行,如需设置为同步执行,见[设置离线模式下命令的同步执行](https://openmldb.ai/docs/zh/main/reference/sql/ddl/SET_STATEMENT.html#id4)因此在离线特征开发阶段,如果使用异步执行,强烈建议使用`SELECT INTO`语句进行开发调试,可以将结果导出到文件,方便查看。
+和很多关系型数据库系统不同,`SELECT`命令在离线模式下默认为异步执行,如需设置为同步执行,见[设置离线模式下命令的同步执行](../reference/sql/ddl/SET_STATEMENT.md#id4)因此在离线特征开发阶段,如果使用异步执行,强烈建议使用`SELECT INTO`语句进行开发调试,可以将结果导出到文件,方便查看。
:::
-用于特征方案部署的命令`DEPLOY`亦在离线模式下执行。其部署规范对于 SQL 还有一定的限制,详细可以参阅 [OpenMLDB SQL上线规范和要求](https://openmldb.ai/docs/zh/main/reference/sql/deployment_manage/ONLINE_SERVING_REQUIREMENTS.html)。
+用于特征方案部署的命令`DEPLOY`亦在离线模式下执行。其部署规范对于 SQL 还有一定的限制,详细可以参阅 [OpenMLDB SQL上线规范和要求](../reference/sql/deployment_manage/ONLINE_REQUEST_REQUIREMENTS.md)。
离线模式可以通过以下方式设置:
@@ -97,6 +97,6 @@ OpenMLDB 针对线上线下的特征工程全流程,在不同阶段提供了
在线请求模式通过以下形式支持:
- CLI:不支持
-- REST APIs:支持单行或者多行 request rows 的请求,详见:[REST APIs](https://openmldb.ai/docs/zh/main/quickstart/rest_api.html)
-- Java SDK:支持单行或者多行 request rows 的请求,详见:[Java SDK 快速上手](https://openmldb.ai/docs/zh/main/quickstart/java_sdk.html)
-- Python SDK:仅支持单行的 request row 请求,详见:[Python SDK 快速上手](https://openmldb.ai/docs/zh/main/quickstart/python_sdk.html)
+- REST APIs:支持单行或者多行 request rows 的请求,详见:[REST APIs](../quickstart/rest_api.md)
+- Java SDK:支持单行或者多行 request rows 的请求,详见:[Java SDK 快速上手](../quickstart/java_sdk.md)
+- Python SDK:仅支持单行的 request row 请求,详见:[Python SDK 快速上手](../quickstart/python_sdk.md)
diff --git a/docs/zh/tutorial/openmldbspark_distribution.md b/docs/zh/tutorial/openmldbspark_distribution.md
index 3f6e104f08e..6cd9c5d6bb6 100644
--- a/docs/zh/tutorial/openmldbspark_distribution.md
+++ b/docs/zh/tutorial/openmldbspark_distribution.md
@@ -2,15 +2,15 @@
## 简介
-OpenMLDB Spark发行版是面向特征工程进行优化高性能原生Spark版本。OpenMLDB Spark和标准Spark发行版一样提供Scala、Java、Python和R编程接口,用户使用OpenMLDB Spark发行版方法与标准版一致。
+OpenMLDB Spark发行版是面向特征工程优化后的高性能原生Spark版本。OpenMLDB Spark和标准Spark发行版一样提供Scala、Java、Python和R编程接口,用户使用OpenMLDB Spark发行版的方法与标准版一致。
GitHub Repo: https://github.com/4paradigm/Spark/
## 下载OpenMLDB Spark发行版
-在Github的[Releases页面](https://github.com/4paradigm/Spark/releases)提供了OpenMLDB Spark发行版的下载地址,用户可以直接下载到本地使用。
+在上述Github仓库的[Releases页面](https://github.com/4paradigm/Spark/releases)提供了OpenMLDB Spark发行版的下载地址,用户可以直接下载到本地使用。
-注意,预编译的OpenMLDB Spark发行版为allinone版本,可以支持Linux和MacOS操作系统,如有特殊需求也可以下载源码重新编译OpenMLDB Spark发行版。
+注意,预编译的OpenMLDB Spark发行版为allinone版本,支持Linux和MacOS操作系统,如有特殊需求也可以下载源码重新编译。
## OpenMLDB Spark配置
@@ -18,28 +18,30 @@ OpenMLDB Spark兼容标准的[Spark配置](https://spark.apache.org/docs/latest/
### 新增配置
-| 配置项 | 说明 | 默认值 | 备注 |
-| ------------------------------------------- | -------------------------- | ------------------------- | ------------------------------------------------------------ |
-| spark.openmldb.window.parallelization | 是否启动窗口并行计算优化 | false | 窗口并行计算可提高集群利用率但增加计算节点 |
-| spark.openmldb.addIndexColumn.method | 添加索引列方法 | monotonicallyIncreasingId | 可选方法为zipWithUniqueId, zipWithIndex, monotonicallyIncreasingId |
-| spark.openmldb.concatjoin.jointype | 拼接拼表方法 | inner | 可选方法为inner, left, last |
-| spark.openmldb.enable.native.last.join | 是否开启NativeLastJoin优化 | true | 相比基于LeftJoin实现性能更高 |
-| spark.openmldb.enable.unsaferow.optimization | 是否开启UnsafeRow内存优化 | false | 开启后使用UnsafeRow编码格式,目前部分复杂类型不支持 |
-| spark.openmldb.opt.unsaferow.project | Project节点是否开启UnsafeRow内存优化 | false | 开启后降低Project节点编解码开销,目前部分复杂类型不支持 |
-| spark.openmldb.opt.unsaferow.window | Window节点是否开启UnsafeRow内存优化 | false | 开启后降低Window节点编解码开销,目前部分复杂类型不支持 |
-| spark.openmldb.opt.join.spark_expr | Join条件是否开启Spark表达式优化 | true | 开启后Join条件计算使用Spark表达式,减少编解码开销,目前部分复杂表达式不支持 |
-| spark.openmldb.physical.plan.graphviz.path | 导出物理计划图片路径 | "" | 默认不导出图片文件 |
+| 配置项 | 说明 | 默认值 | 备注 |
+| ------------------------------------------- |----------------------------| ------------------------- |-------------------------------------------------------------|
+| spark.openmldb.window.parallelization | 是否启动窗口并行计算优化 | false | 窗口并行计算可提高集群利用率但会增加计算节点 |
+| spark.openmldb.addIndexColumn.method | 添加索引列方法 | monotonicallyIncreasingId | 可选方法有zipWithUniqueId, zipWithIndex, monotonicallyIncreasingId |
+| spark.openmldb.concatjoin.jointype | 拼接拼表方法 | inner | 可选方法有inner, left, last |
+| spark.openmldb.enable.native.last.join | 是否开启NativeLastJoin优化 | true | 相比基于LeftJoin的实现,具有更高性能 |
+| spark.openmldb.enable.unsaferow.optimization | 是否开启UnsafeRow内存优化 | false | 开启后使用UnsafeRow编码格式,目前部分复杂类型不支持 |
+| spark.openmldb.opt.unsaferow.project | Project节点是否开启UnsafeRow内存优化 | false | 开启后降低Project节点编解码开销,目前部分复杂类型不支持 |
+| spark.openmldb.opt.unsaferow.window | Window节点是否开启UnsafeRow内存优化 | false | 开启后降低Window节点编解码开销,目前部分复杂类型不支持 |
+| spark.openmldb.opt.join.spark_expr | Join条件是否开启Spark表达式优化 | true | 开启后Join条件计算使用Spark表达式,减少编解码开销,目前部分复杂表达式不支持 |
+| spark.openmldb.physical.plan.graphviz.path | 导出物理计划图片的路径 | "" | 默认不导出图片文件 |
* 如果SQL任务有多个窗口计算并且计算资源足够,推荐开启窗口并行计算优化,提高资源利用率和降低任务运行时间。
* 如果SQL任务中Join条件表达式比较复杂,默认运行失败,推荐关闭Join条件Spark表达式优化,提高任务运行成功率。
-* 如果SQL任务中输入表或中间表列数较大,推荐同时开启三个UnsafeRow优化开关,减少编解码开销和降低任务运行时间。
+* 如果SQL任务中输入表或中间表列数较大,推荐同时开启上表的三个UnsafeRow优化,减少编解码开销和降低任务运行时间。
+
+## 使用
### 使用Example Jars
下载解压后,设置`SPARK_HOME`环境变量,可以直接执行Example Jars中的例子。
-```
-export SPARK_HOME=`pwd`/spark-3.0.0-bin-openmldbspark/
+```java
+export SPARK_HOME=`pwd`/spark-3.2.1-bin-openmldbspark/
$SPARK_HOME/bin/spark-submit \
--master local \
@@ -53,7 +55,7 @@ $SPARK_HOME/bin/spark-submit \
下载OpenMLDB Spark发行版后,也可以使用标准的PySpark编写应用,示例代码如下。
-```scala
+```python
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql.types import *
diff --git a/docs/zh/tutorial/tutorial_sql_1.md b/docs/zh/tutorial/tutorial_sql_1.md
index 45ddf3e6bb9..3d2fe09a307 100644
--- a/docs/zh/tutorial/tutorial_sql_1.md
+++ b/docs/zh/tutorial/tutorial_sql_1.md
@@ -1,21 +1,22 @@
+
# 基于 SQL 的特征开发(上)
## 1. 什么是机器学习的特征工程
-一个真实场景的机器学习应用一般会包含两个主体流程,即**特征工程**和**机器学习模型**(以下简称**模型**)。大家对模型一定很了解,平时也是接触的最多的,比如从经典的逻辑回归、决策树模型,到近几年大火的深度学习模型,都是聚焦于如何开发高质量的模型。对于特征工程,可能大家相对关注较少。但是大家一定听说过坊间传闻的一句”名言“:数据和特征决定了机器学习的上限,而模型和算法只是逼近这个上限而已。由此可见,对于特征工程的重要性大家早有共识。
+一个真实场景的机器学习应用一般会包含两个主要任务,即进行**特征工程**和构建**机器学习模型**(以下简称**模型**)。大家对模型一定很了解,平时也是接触的最多的,比如从经典的逻辑回归、决策树模型,到近几年大火的深度学习模型,都聚焦于如何开发高质量的模型。特征工程受到关注相对较少。但是大家一定听说过一句著名的坊间传闻:数据和特征决定了机器学习的上限,而模型和算法只是逼近这个上限而已。由此可见,对于特征工程的重要性大家早有共识。
一句话来定义特征工程:使用特定的领域知识,从原始数据中抽取有用的特征信息。这里强调了特定的领域知识(domain knowledge),也就是说特征抽取并不是一个标准化过程,而是基于不同的场景有不同的经验和方法论。举个简单的例子,对于实时推荐系统来说,原始数据可能只是用户实时打入的搜索关键字,如“洗衣机”,以及相应的存放于数据库中的用户和商品数据表格。那么为了更好的进行实时推荐,可以考虑如下更有意义的特征:
- 该用户过去一年购买最多的家电品牌
-- 该用户过去三年在大家电类的消费上的平均消费水平
-- 过去一小时平台上打折力度 7 折以上,符合该用户性别和年龄组的用户所购买量排名前三的洗衣机型号
+- 该用户过去三年在大家电类的平均消费水平
+- 过去一小时平台上打折力度在 7 折以上,符合该用户性别和年龄组的用户所购买的数量排名前三的洗衣机型号
通过上面的例子可以看到,特征可以做的相当复杂,并且可以具有非常高的时效性。那么如何根据特定场景,抽取好的特征,这就是数据科学家需要的修养,同时需要配备足够强大的工具,才能做好特征工程。本教程抛砖引玉,来让大家认识如何在实践中做特征工程。
## 2. 特征工程开发利器 – OpenMLDB
-工欲善必先利其器,在介绍特征工程算法之前,我们先有必要来认识一下特征工程的开发和部署工具。根据经验,我们粗略的把他们分类,并且总结了各自的优缺点。
+工欲善其事必先利其器,在介绍特征工程算法之前,有必要先来认识一下特征工程的开发和部署工具。根据经验,下表粗略地把常见的工具进行了分类、总结和比较。
| 开发工具 | 入门门槛 | 功能支持 | 工程化落地 |
| ----------------------------------------------------- | ---------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
@@ -24,50 +25,50 @@
| 混合开发,比如离线使用 Python,线上使用数据库或者 C++ | 非常高,需要两组技能栈团队开发维护 | 通过开发和一定的定制化,可以满足功能需求。 | 可接受,但是成本较高。除了开发运营成本以外,还需要解决线上线下一致性问题,保证离线和在线效果一致。 |
| OpenMLDB | 中,基于 SQL 进行开发 | 针对特征工程优化,基于标准 SQL 进行扩展,高效支持特征工程常用的计算方法。 | 可低成本高效落地。基于 SQL 开发,实现开发即上线,天然解决性能和线上线下一致性问题。 |
-从上面的表格中总结可以看到,OpenMLDB 在功能和工程化落地方面都具有独特的优势,特别对于实时性高的时序特征计算,OpenMLDB 有不少的针对性优化。如果希望进一步了解 OpenMLDB,可以阅读相关 [介绍文档](https://zhuanlan.zhihu.com/p/462559609),以及 [OpenMLDB 的 GitHub repo](https://github.com/4paradigm/OpenMLDB) 。
+从上面的表格中可以看到,OpenMLDB 在功能和工程化落地方面都具有独特的优势,特别对于实时性高的时序特征计算,OpenMLDB 有不少的针对性优化。如果希望进一步了解 OpenMLDB,可以阅读相关 [介绍文档](https://zhuanlan.zhihu.com/p/462559609),以及 [OpenMLDB 的 GitHub repo](https://github.com/4paradigm/OpenMLDB) 。
-在本系列教程中,我们将会基于 OpenMLDB 的 SQL 语法,来实践演示如何基于 SQL 开发特征工程脚本。你可以通过阅读我们的文档 - [OpenMLDB 快速上手](http://docs-cn.openmldb.ai/2620852),来了解如何试用 OpenMLDB(推荐基于 docker 镜像,通过单机版来快速试用);你也可以在这里找到我们 [完整的产品说明文档](http://docs-cn.openmldb.ai/)。
+在本系列教程中,我们将会基于 OpenMLDB 的 SQL 语法,通过实践来演示如何基于 SQL 开发特征工程脚本。你可以通过阅读我们的文档 -- [OpenMLDB 快速上手](../quickstart/openmldb_quickstart.md),来了解如何试用 OpenMLDB(推荐基于 docker 镜像,通过单机版来快速试用);你也可以在这里找到我们 [完整的产品说明文档](https://openmldb.ai/docs/zh)。
## 3. 从 0 到 1,特征工程实践
-我们将会分上下两篇介绍特征工程常用的处理方法,本篇将会侧重单表特征处理,下一篇我们将会聚焦更为复杂的多表特征计算。本文使用在金融领域普遍使用的反欺诈作为实际案例进行描述。
+我们将会分上下两篇介绍特征工程常用的处理方法,本篇将会侧重单表特征处理,下一篇我们将会聚焦更为复杂的多表特征计算。本文使用在金融领域普遍使用的反欺诈作为案例。
-注意,如果你想运行本篇教程所举例的 SQL,请按照以下两个步骤做准备:
+注意,如果你想运行本篇教程的 SQL,请按照以下两个步骤做准备:
-- 推荐使用 docker 镜像在**单机版**下运行本教程,镜像拉取和 CLI 运行方式参考 [OpenMLDB 快速上手](http://docs-cn.openmldb.ai/2620852)。如果使用集群版,请使用离线模式(`SET @@execute_mode='offline'` )。集群版 CLI 下的普通线上模式仅支持简单的数据预览功能,因此无法运行教程中大部分的 SQL。
+- 推荐使用 docker 镜像在**单机版**下运行本教程,镜像拉取和 CLI 运行方式参考 [OpenMLDB 快速上手](../quickstart/openmldb_quickstart.md)。如果使用集群版,请使用离线模式(`SET @@execute_mode='offline'` )。集群版 CLI 仅支持离线模式和在线预览模式。而在线预览模式仅支持简单的数据预览功能,因此无法运行教程中大部分的 SQL。
- 本教程相关的所有数据以及导入操作脚本可以在[这里下载](https://openmldb.ai/download/tutorial_sql/tutoral_sql_data.zip)。
### 3.1. 基本概念
### 3.1.1. 主表和副表
-**主表**是特征抽取的主体数据表。直观上可以理解主表为带有模型训练所需要的标签(label)列的数据表格。在特征工程过程中,会对主表的每一行进行特征计算,最终生成对应的**特征宽表**。例如,下面这张用户交易表(以下代指为数据表 t1),是本文所述案例的主表。
+**主表**是特征抽取的主体数据表。直观上可以理解主表为带有模型训练所需要的标签(label)列的数据表格。在特征工程过程中,会对主表的每一行进行特征计算,最终生成对应的**特征宽表**。例如,下面这张用户交易表(下文以数据表 t1代指),是本文使用的案例的主表。
-| Field | Type | Description |
-| ---------- | --------- | --------------------------- |
-| id | BIGINT | 样本ID,每一条样本拥有唯一ID |
-| uid | STRING | 用户ID |
-| mid | STRING | 商户ID |
-| cardno | STRING | 卡号 |
+| Field | Type | Description |
+| ---------- | --------- |-------------------------|
+| id | BIGINT | 样本ID,每一条样本拥有唯一ID |
+| uid | STRING | 用户ID |
+| mid | STRING | 商户ID |
+| cardno | STRING | 卡号 |
| trans_time | TIMESTAMP | 交易时间 |
| trans_amt | DOUBLE | 交易金额 |
| trans_type | STRING | 交易类型 |
-| province | STRING | 省份 |
-| city | STRING | 城市 |
-| label | BOOL | 样本label, true\|false |
+| province | STRING | 省份 |
+| city | STRING | 城市 |
+| label | BOOL | 样本label, `true`或`false` |
-除了主表以外,数据库中可能还存在着存储相关辅助信息的数据表格,可以通过 join 操作和主表进行拼接,这些表格称为**副表**(注意副表可能有多张)。比如我们可以有一张副表存储着商户流水历史记录。在做特征工程过程中,把主表和副标的信息拼接起来,可以获得更为有价值的信息。关于多表的特征工程,我们将在本系列的下篇详细介绍。
+除了主表以外,数据库中可能还存在着其他存储相关辅助信息的数据表格,可以通过 join 操作和主表进行拼接,这些表格称为**副表**(注意副表可能有多张)。比如我们可以有一张副表存储着商户流水历史记录。在做特征工程过程中,把主表和副标的信息拼接起来,可以获得更为有价值的信息。关于多表的特征工程,我们将在[本系列的下篇](../tutorial_sql_2.md)详细介绍。
### 3.1.2. 特征分类
-在深入讨论特征构建细节之前,我们需要对目前机器学习下常用的特征进行分类,从构建特征数据集以及聚合方式上看,机器学习常用的特征包含四种:
+在深入讨论特征构建的细节之前,我们需要对目前机器学习下常用的特征处理方式进行分类,从构建特征数据集以及聚合方式上看,机器学习常用的特征处理方式有如下四种:
- 主表单行特征:对主表的一列或者多列进行表达式和函数加工计算。
- 主表窗口时序特征:对主表构建时序窗口,在窗口内进行时序特征加工。
- 副表单行特征:当前主表行从副表中匹配一条记录并拼接,然后对拼接后的数据行进行单行特征加工。
- 副表多行聚合特征:当前主表行从副表中匹配多条记录,对多条记录进行特征加工。
-本文上篇将会着重介绍主表单行特征和主表窗口时序特征,稍后推出的下篇将会具体展开介绍副表单行特征以及副表多行聚合特征。
+本文作为上篇将会着重介绍主表单行特征和主表窗口时序特征。下篇将会具体介绍副表单行特征以及副表多行聚合特征。
### 3.2. 主表单行特征
@@ -75,7 +76,7 @@
**列直取**
-主表的某些列,直接就可以作为特征参与模型训练。
+主表的某些列,直接作为特征参与模型训练。
```sql
SELECT uid, trans_type FROM t1;
@@ -114,10 +115,10 @@ minute(trans_time) as f_trans_minute FROM t1;
我们既可以通过时间区间(如一个月),也可以通过窗口内的行数(如 100 条),去定义一个具体的时序窗口大小。时序窗口的最基本定义方式:
```sql
-window window_name as (PARTITION BY partition_col ORDER BY order_col ROWS_RANGE|ROWS BETWEEN StartFrameBound AND EndFrameBound)
+window window_name as (PARTITION BY partition_col ORDER BY order_col ROWS_RANGE | ROWS BETWEEN StartFrameBound AND EndFrameBound)
```
-其中,最基本的不可或缺的语法元素包括:
+其中,不可或缺的语法元素包括:
- `PARTITION BY partition_col`: 表示窗口按照`partition_col`列分组
@@ -127,69 +128,61 @@ window window_name as (PARTITION BY partition_col ORDER BY order_col ROWS_RANGE
- `StartFrameBound`: 表示该窗口的上界。在OpenMLDB中,一般我们可以定义窗口上界为:
-- - `UNBOUNDED PRECEDING`: 无上界。
+ - `UNBOUNDED PRECEDING`: 无上界。
- `time_expression PRECEDING`: 如果是时间窗口,可以定义时间偏移,如`30d PRECEDING`表示窗口上界为当前行的时间-30天。
- `number PRECEDING`: 如果是条数窗口,可以定义条数偏移。如,`100 PRECEDING`表示窗口上界为的当前行的前100行。
- `EndFrameBound`: 表示该时间窗口的下界。在OpenMLDB中,一般我们可以定义窗口下界为:
-- - `CURRENT ROW`: 当前行
+ - `CURRENT ROW`: 当前行
- `time_expression PRECEDING`: 一定的时间偏移,如`1d PRECEDING`。这表示窗口下界为当前行的时间-1天。
- `number PRECEDING`: 如果是条数窗口,可以定义条数偏移。如,`1 PRECEDING`表示窗口上界为的当前行的前1行。
- 配置窗口上下界时,请注意:
-
-- - OpenMLDB 目前无法支持当前行以后的时间作为上界和下界。如`1d FOLLOWING`。换言之,我们只能处理历史时间窗口。这也基本满足大部分的特征工程的应用场景。
+ - OpenMLDB 目前无法支持当前行以后的时间作为上界和下界。如`1d FOLLOWING`。换言之,我们只能处理历史时间窗口。这也基本满足大部分的特征工程的应用场景。
- OpenMLDB 的下界时间必须>=上界时间
- OpenMLDB 的下界条数必须<=上界条数
-更多语法和特性可以参考 [OpenMLDB窗口参考手册](http://docs-cn.openmldb.ai/2620896)。
-
+更多语法和特性可以参考 [OpenMLDB窗口参考手册](../reference/sql/dql/WHERE_CLAUSE.md)。
+#### 示例
+对于上面所示的交易表 t1,我们定义两个时间窗口和两个条数窗口。每一个样本行的窗口均按用户ID(`uid`)分组,按交易时间(`trans_time`)排序。下图展示了分组排序后的数据。
![img](images/table_t1.jpg)
-以下举例说明,对于上面所示的交易表 t1,我们定义两个时间窗口和两个条数窗口。每一个样本行的窗口都是按用户ID(`uid`)分组,按交易时间(`trans_time`)排序。注意以下窗口定义并不是完整的 SQL,稍后我们加上聚合函数以后才是完整的可运行 SQL。
-
-- w1d: 用户最近一天的窗口
+注意以下窗口定义并不是完整的 SQL,加上聚合函数以后才是完整的可运行 SQL(见[3.3.2](#332-步骤二多行聚合函数加工))。
+**w1d: 用户最近一天的窗口,包含当前行到最近1天以内的数据行**
```sql
--- 用户最近一天的窗口,包含当前行到最近1天以内的数据行
window w1d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW)
```
+如上图,样本9的 w1d 窗口包含了三行数据。分别是样本 6,8,9。这三条数据落在样本9的时间窗口内 [2022-02-07 12:00:00, 2022-02-08 12:00:00]。
-样本9的 w1d 窗口包含了三行数据。分别是样本 6,8,9。这三条数据落在样本9的时间窗口内 [2022-02-07 12:00:00, 2022-02-08 12:00:00]。
-
-- w1d_10d: 用户1天以前和最近10天的窗口
+**w1d_10d: 用户1天以前和最近10天的窗口**
```sql
--- 用户1d~10d的窗口,包含1天以前,10天以内的数据行
window w1d_10d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 10d PRECEDING AND 1d PRECEDING)
```
+如上图,样本9的w1d_10d窗口包含了三行数据。分别是样本1,3,4。这三条数据落在样本9的时间窗口内[2022-01-29 12:00:00, 2022-02-07 12:00:00]。
-样本9的w1d_10d窗口包含了三行数据。分别是样本1,3,4。这三条数据落在样本9的时间窗口内[2022-01-29 12:00:00, 2022-02-07 12:00:00]。
-
-- w0_1: 用户最近0~1行窗口
+**w0_1: 用户最近0~1行窗口,包含前一行和当前行**
```sql
--- 用户最近1行窗口,包含前一行和当前行
window w0_1 as (PARTITION BY uid ORDER BY trans_time ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
```
+如上图,样本10的w0_1窗口包含了2行数据。分别是样本7和样本10。
-样本10的w0_1窗口包含了2行数据。分别是样本7和样本10。
-
-- w2_10: 用户最近2~10行窗口
+**w2_10: 用户最近2~10行窗口**
```sql
--- 用户最近2~10行窗口,包含前2~10行
window w2_10 as (PARTITION BY uid ORDER BY trans_time ROWS BETWEEN 10 PRECEDING AND 2 PRECEDING)
```
+如上图,样本10的w2_10窗口包含了2行数据。分别是样本2和样本5。
-样本10的w2_10窗口包含了2行数据。分别是样本2和样本5。
### 3.3.2. 步骤二:多行聚合函数加工
定义好时间窗口以后,我们可以做时间窗口内的多行聚合函数计算。
-**简单聚合统计**
+**简单聚合计算**
聚合函数目前支持:`count()`, `sum()`, `max()`, `min()`, `avg()`,示例如下。
@@ -207,7 +200,7 @@ FROM t1
window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW);
```
-**过滤后聚合统计**
+**过滤后聚合计算**
先对数据集按条件过滤后,然后进行简单统计。函数形如 `xxx_where`:
@@ -236,11 +229,9 @@ FROM t1
window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW);
```
-**分组后聚合统计**
+**分组后聚合计算**
-对数据集按某一列进行分组,然后分组统计,统计结果保存为形如`"k1:v1,k2:v2,k3:v3"`的的字符串。
-
-函数形如 `xxx_cate`:
+对数据集按某一列进行分组,然后分组统计,统计结果保存为形如`"k1:v1,k2:v2,k3:v3"`的的字符串。 函数形如 `xxx_cate`:
```sql
xxx_cate(col, cate) over w
@@ -269,11 +260,9 @@ FROM t1
window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW);
```
-**过滤后再分组聚合统计**
-
-先对窗口按条件过滤后, 然后对数据集按某一列进行分组,然后分组统计,统计结果保存为形如`"k1:v1,k2:v2,k3:v3"`的的字符串。
+**过滤后再分组聚合计算**
-函数形如 `xxx_cate_where`:
+先对窗口按条件过滤后, 然后对数据集按某一列进行分组,然后分组统计,统计结果保存为形如`"k1:v1,k2:v2,k3:v3"`的字符串。 函数形如 `xxx_cate_where`:
```text
xxx_cate_where(col, filter_condition, cate) over w
@@ -305,23 +294,23 @@ window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PREC
**按类型列进行频率统计**
-通常,我们对类型特征会进行频率的统计。例如,我们可能需要统计各个类别中,最高频次的类型,最高频的类型的频度占比等。
+通常,我们会对类型特征进行频率的统计。例如,我们可能需要统计各个类别中,最高频次的类型,最高频的类型的频度占比等。
-Top ratio 特征`fz_top1_ratio`:求窗口内某个分类count最大的count数占窗口总数据的比例。
+**Top ratio 特征`fz_top1_ratio`**:求窗口内某列数量最多的类别占窗口总数据的比例。
+以下SQL使用`fz_top1_ratio`求t1中最近30天的交易次数最大的城市的交易次数占比。
```sql
SELECT
--- 最近30天的交易次数最大的城市的交易次数占比
fz_top1_ratio(city) over w30d as top_city_ratio
FROM t1
window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW);
```
-Top N 特征`fz_topn_frequency(col, top_n)`: 求取窗口内某个分类频率最高的N个分类
+**Top N 特征`fz_topn_frequency(col, top_n)`**: 求取窗口内某列频率最高的N个类别
+以下SQL使用`fz_topn_frequency`求t1中最近30天的交易次数最大的2个城市。
```sql
SELECT
--- 最近30天的交易次数最大的2个城市, "beijing,shanghai"
fz_topn_frequency(city, 2) over w30d as top_city_ratio
FROM t1
window w30d as (PARTITION BY uid ORDER BY trans_time ROWS_RANGE BETWEEN 30d PRECEDING AND CURRENT ROW);
diff --git a/docs/zh/tutorial/tutorial_sql_2.md b/docs/zh/tutorial/tutorial_sql_2.md
index 66162ec3045..7bfa7c93c1a 100644
--- a/docs/zh/tutorial/tutorial_sql_2.md
+++ b/docs/zh/tutorial/tutorial_sql_2.md
@@ -2,31 +2,32 @@
## 1. 准备知识
-在上期系列文章中([深入浅出特征工程 -- 基于 OpenMLDB 的实践指南(上)](https://zhuanlan.zhihu.com/p/467625760)),我们介绍了特征工程的基础概念、实践工具,以及基本的基于单表的特征脚本开发。在本篇文章中,我们将基于主表和副表,去展开详细介绍更加复杂和强大的基于多表的特征脚本开发。同时,我们依然依托 OpenMLDB 所提供的 SQL 语法进行特征工程脚本示例,关于 OpenMLDB 的更多信息可以访问 [OpenMLDB 的 GitHub repo](https://github.com/4paradigm/OpenMLDB),以及 [文档网站](http://docs-cn.openmldb.ai/)。
+在[深入浅出特征工程 -- 基于 OpenMLDB 的实践指南(上)](https://zhuanlan.zhihu.com/p/467625760)中,我们介绍了特征工程的基础概念、实践工具,以及基于单表的特征脚本开发。本文将基于主表和副表,详细介绍更加复杂和强大的基于多表的特征脚本开发。同时,我们依然依托 OpenMLDB 所提供的 SQL 语法进行特征工程脚本示例,关于 OpenMLDB 的更多信息可以访问 [OpenMLDB 的 GitHub repo](https://github.com/4paradigm/OpenMLDB),以及 [文档网站](https://openmldb.ai/docs/zh/main/)。
-如果你想运行本篇教程所举例的 SQL,请按照以下两个步骤做准备:
+如果你想运行本篇教程中的 SQL,请按照以下两个步骤做准备:
-- 推荐使用 OpenMLDB docker 镜像在**单机版**下运行本教程,运行方式参考 [OpenMLDB 快速上手](http://docs-cn.openmldb.ai/2620852)。如果使用集群版,请使用离线模式(`SET @@execute_mode='offline'` )。集群版普通线上模式仅支持简单的数据预览功能,因此无法运行教程中大部分的 SQL。
+- 推荐使用 OpenMLDB docker 镜像在**单机版**下运行本教程,运行方式参考 [OpenMLDB 快速上手](../quickstart/openmldb_quickstart.md)。如果使用集群版,请使用离线模式(`SET @@execute_mode='offline'` )。集群版 CLI 仅支持离线模式和在线预览模式。而在线预览模式仅支持简单的数据预览功能,因此无法运行教程中大部分的 SQL。
- 本教程相关的所有数据以及导入操作脚本可以在 [这里下载](https://openmldb.ai/download/tutorial_sql/tutoral_sql_data.zip)。
-在本篇文章中,我们将会使用到主表和副表,进行举例说明。我们依然使用上篇的反欺诈交易的样例数据,包含一张主表用户交易表(表一 t1)和一张副表商户流水表(表二 t2)。需要多表特征工程的背景,是在关系数据库设计中,为了避免数据冗余和一致性,一般都会按照一定的设计原则(数据库设计范式),把数据存入多个数据表中。在特征工程中,为了获得足够的有效信息,需要在多个表中取出数据,因此需要基于多表进行特征工程。
+本文将用到主表和副表进行举例说明。样例数据是上篇使用的反欺诈交易数据集,包含一张主表:用户交易表(t1)和一张副表:商户流水表(t2)。
+在关系型数据库设计中,为了避免数据冗余以及保证数据一致性,一般都会按照一定的设计原则(数据库设计范式),把数据存入多个数据表中。在特征工程中,为了获得足够的有效信息,需要在多个表中取出数据,因此需要基于多表进行特征工程。
-**表一:主表,用户交易表 t1**
+**主表:用户交易表 t1**
-| Field | Type | Description |
-| ---------- | --------- | --------------------------- |
-| id | BIGINT | 样本ID,每一条样本拥有唯一ID |
-| uid | STRING | 用户ID |
-| mid | STRING | 商户ID |
-| cardno | STRING | 卡号 |
+| Field | Type | Description |
+| ---------- | --------- |-------------------------|
+| id | BIGINT | 样本ID,每一条样本拥有唯一ID |
+| uid | STRING | 用户ID |
+| mid | STRING | 商户ID |
+| cardno | STRING | 卡号 |
| trans_time | TIMESTAMP | 交易时间 |
| trans_amt | DOUBLE | 交易金额 |
| trans_type | STRING | 交易类型 |
-| province | STRING | 省份 |
-| city | STRING | 城市 |
-| label | BOOL | 样本label, true\|false |
+| province | STRING | 省份 |
+| city | STRING | 城市 |
+| label | BOOL | 样本label, `true`或`false` |
-**副表:表二,商户流水表 t2**
+**副表:商户流水表 t2**
| Field | Type | Description |
| ------------- | --------- | ---------------------- |
@@ -36,22 +37,22 @@
| purchase_amt | DOUBLE | 消费金额 |
| purchase_type | STRING | 消费类型:现金、信用卡 |
-在传统关系数据库中,为了取得多表的信息,最常用的方式是使用 join 进行拼接。但是对于特征工程的需求来说,数据库的 join 并不能非常高效的满足需求。最主要的原因是我们的主表样本表有一个用于模型训练的 label 列,其每一个值只能对应一行数据记录。所以实际中我们希望在 join 以后,结果表格的行数需要和主表的行数保持一致。
+在传统关系数据库中,为了取得多表的信息,最常用的方式是使用 join 进行拼接。 但是数据库的 join 并不能非常高效的满足特征工程的需求。 最主要的原因是我们的主表样本表有一个用于模型训练的 label 列,其每一个值只能对应一行数据记录。我们希望在 join 以后,结果表格的行数和主表的行数保持一致。
## 2. 副表单行特征
## 2.1 LAST JOIN
-OpenMLDB 目前支持`LAST JOIN`来进行类似数据库的 join 操作。LAST JOIN 可以看作一种特殊的 LEFT JOIN。在满足 JOIN 条件的前提下,左表的每一行拼取一条符合条件的最后一行。LAST JOIN分为无序拼接,和有序拼接。我们用更简单的表为例,假设表 s1,s2 的 schema 均为
+OpenMLDB 目前支持`LAST JOIN`来进行类似数据库的 join 操作。LAST JOIN 可以看作一种特殊的 LEFT JOIN。在满足 JOIN 条件的前提下,左表的每一行拼取右表符合条件的最后一行。LAST JOIN分为无序拼接和有序拼接。
+用简单的表为例,假设表 s1,s2 的 schema 均为
```sql
(id int, col1 string, std_ts timestamp)
```
-那么,我们可以做这样的join操作:
+那么,可以进行如下JOIN操作:
```sql
--- des c: 基于 ORDER BY 的有序 LAST JOIN 拼接
SELECT * FROM s1 LAST JOIN s2 ORDER BY s2.std_ts ON s1.col1 = s2.col1;
```
@@ -63,14 +64,14 @@ SELECT * FROM s1 LAST JOIN s2 ORDER BY s2.std_ts ON s1.col1 = s2.col1;
## 3. 副表多行聚合特征
-OpenMLDB 针对副表拼接场景,扩展了标准的 WINDOW 语法,新增了 [WINDOW UNION](http://docs-cn.openmldb.ai/2620896) 的特性,支持从副表拼接多条数据形成副表窗口。在副表拼接窗口的基础上,可以方便构建副表多行聚合特征。同样地,构造副表多行聚合特征也需要完成两个步骤:
+OpenMLDB 针对副表拼接场景,扩展了标准的 WINDOW 语法,新增了 [WINDOW UNION](../reference/sql/dql/WINDOW_CLAUSE.md#window-union) 的特性,支持从副表拼接多条数据形成副表窗口。在副表拼接窗口的基础上,可以方便构建副表多行聚合特征。同样地,构造副表多行聚合特征也需要完成两个步骤:
- 步骤一:定义副表拼接窗口。
- 步骤二:在副表拼接窗口上构造副表多行聚合特征。
## 3.1 步骤一: 定义副表拼接窗口
-主表的每一个样本行都可以从副表中按某列拼接多行数据,并允许定义拼接数据的时间区间或者条数区间。我们通过特殊的窗口语法 WINDOW UNION 来定义副表拼接条件和区间范围。为了方便理解,我们将这种的窗口我们称之为副表拼接窗口。
+主表的每一个样本行都可以从副表中按某列拼接多行数据,并允许定义拼接数据的时间区间或者条数区间。我们通过特殊的窗口语法 WINDOW UNION 来定义副表拼接条件和区间范围。为了便于理解,我们将这种窗口称之为**副表拼接窗口**。
副表拼接窗口的语法定义为:
@@ -78,51 +79,58 @@ OpenMLDB 针对副表拼接场景,扩展了标准的 WINDOW 语法,新增了
window window_name as (UNION other_table PARTITION BY key_col ORDER BY order_col ROWS_RANGE|ROWS BETWEEN StartFrameBound AND EndFrameBound)
```
-其中,最基本的不可或缺的语法元素包括:
+其中,不可或缺的语法元素包括:
- `UNION other_table`: `other_table` 是指进行 WINDOW UNION 的副表。 主表和副表需要保持schema一致。大部分情况下,主表和副表的schema都是不同的。因此,我们可以通过对主表和副表进行列筛选和默认列配置来保证参与窗口计算的主表和副表schema一致。列筛选还可以去掉无用列,只在关键列上做 WINDOW UNION 和聚合。
- `PARTITION BY key_col`: 表示按列 `key_col` 从副表拼接匹配数据。
-- `ORDER BY order_col`: 表示副表拼接数据集按照`order_col`列进行排序
+- `ORDER BY order_col`: 表示副表拼接数据集按照`order_col`列进行排序。
- `ROWS_RANGE BETWEEN StartFrameBound AND EndFrameBound`: 表示副表拼接窗口的时间区间
-- - `StartFrameBound`表示该窗口的上界。
+ - `StartFrameBound`表示该窗口的上界。
- - - `UNBOUNDED PRECEDING`: 无上界。
- - `time_expression PRECEDING`: 如果是时间区间,可以定义时间偏移,如`30d preceding`表示窗口上界为当前行的时间-30天。
+ - `UNBOUNDED PRECEDING`: 无上界。
+ - `time_expression PRECEDING`: 如果是时间区间,可以定义时间偏移,如`30d preceding`表示窗口上界为当前行的时间的前30天。
- `EndFrameBound`表示该时间窗口的下界。
- - - `CURRENT ROW`: 当前行
- - `time_expression PRECEDING`: 如果是时间区间,可以定义时间偏移,如`1d PRECEDING`。这表示窗口下界为当前行的时间-1天。
+ - `CURRENT ROW`: 当前行
+ - `time_expression PRECEDING`: 如果是时间区间,可以定义时间偏移,如`1d PRECEDING`。这表示窗口下界为当前行的时间的前1天。
-- `ROWS BETWEEN StartFrameBound AND EndFrameBound`: 表示副表拼接窗口的时间区间
+- `ROWS BETWEEN StartFrameBound AND EndFrameBound`: 表示副表拼接窗口的条数区间
-- - `StartFrameBound`表示该窗口的上界。
+ - `StartFrameBound`表示该窗口的上界。
- - - `UNBOUNDED PRECEDING`: 无上界。
- - `number PRECEDING`: 如果是条数区间,可以定义时间条数。如,`100 PRECEDING`表示窗口上界为的当前行的前100行。
+ - `UNBOUNDED PRECEDING`: 无上界。
+ - `number PRECEDING`: 如果是条数区间,可以定义条数。如,`100 PRECEDING`表示窗口上界为的当前行的前100行。
- `EndFrameBound`表示该时间窗口的下界。
- - - `CURRENT ROW`: 当前行
- - `number PRECEDING`: 如果是条数窗口,可以定义时间条数。如,`1 PRECEDING`表示窗口上界为的当前行的前1行。
+ - `CURRENT ROW`: 当前行
+ - `number PRECEDING`: 如果是条数窗口,可以定义条数。如,`1 PRECEDING`表示窗口上界为的当前行的前1行。
+
+```{note}
- 配置窗口区间界时,请注意:
-
-- - OpenMLDB 目前无法支持当前行以后的时间作为上界和下界。如`1d FOLLOWING`。换言之,我们只能处理历史时间窗口。这也基本满足大部分的特征工程的应用场景。
+ - OpenMLDB 目前无法支持当前行以后的时间作为上界和下界。如`1d FOLLOWING`。换言之,我们只能处理历史时间窗口。这也基本满足大部分的特征工程的应用场景。
- OpenMLDB 的下界时间必须>=上界时间
- OpenMLDB 的下界的条数必须<=上界条数
-
- `INSTANCE_NOT_IN_WINDOW`: 标记为副表拼接窗口。主表除了当前行以外,其他数据不进入窗口。
+- 更多语法和特性可以参考 [OpenMLDB窗口UNION参考手册](../reference/sql/dql/WINDOW_CLAUSE.md)。
+
+```
+
+### 示例
-更多语法和特性可以参考 [OpenMLDB窗口UNION参考手册](https://link.zhihu.com/?target=http%3A//docs-cn.openmldb.ai/2620896)。
+以下通过具体例子来展示 WINDOW UNION 的定义方式。
-以下通过具体例子来描述 WINDOW UNION 的拼接窗口定义操作。对于前面所述为用户交易表 t1,我们需要定义商户流水表 t2 的副表上拼接窗口,该拼接是基于 `mid` 进行。由于 t1 和 t2 的schema不同,所以我们首先分别从 t1 和 t2 抽取相同的列,对于不存在的列,可以配置缺省值。其中,`mid` 列用于两个表的拼接,所以是必须的;其次,时间戳的列(t1 中的 `trans_time`,t2 中的 `purchase_time`)包含时序信息,在定义时间窗口时候也是必须的;其余列按照聚合函数需要,进行必要的筛选保留。
+对于上文的用户交易表 t1,我们需要定义在商户流水表 t2 的副表上拼接窗口,该拼接基于 `mid` 进行。
+由于 t1 和 t2 的schema不同,所以我们首先分别从 t1 和 t2 抽取相同的列,对于在某个表中不存在的列,可以配置缺省值。
+其中,`mid` 列用于两个表的拼接,所以是必须的; 其次,作为时间戳的列(t1 中的 `trans_time`,t2 中的 `purchase_time`)包含时序信息, 在定义时间窗口时候也是必须的;其余列按照聚合函数需要,进行必要的筛选保留。
-以下 SQL 和示意图为从 t1 抽取必要列,生成 t11。
+以下 SQL 和示意图展示了从 t1 抽取必要列,生成 t11。
```sql
(select id, mid, trans_time as purchase_time, 0.0 as purchase_amt, "" as purchase_type from t1) as t11
@@ -130,19 +138,20 @@ window window_name as (UNION other_table PARTITION BY key_col ORDER BY order_col
![img](images/t1_to_t11.jpg)
-以下 SQL 和示意图为从 t2 抽取必要列,生成 t22。
+以下 SQL 和示意图展示了从 t2 抽取必要列,生成 t22。
```sql
-(select 0L as id, mid, purchase_time, purchase_amt, purchase_type from t2) as t22
+(select 0 as id, mid, purchase_time, purchase_amt, purchase_type from t2) as t22
```
-![img](images/t2_to_t22.jpg)
+![img](images/t2_to_t22.png)
-可以看到,分别完成抽取以后生成的表格 t11 和 t22,已经具有了相同的 schema,两者可以进行逻辑上的 UNION 操作。但是在 OpenMLDB 中,WINDOW UNION 并不是真的为了进行传统数据库中的 UNION 操作,而是为了对于 t11 中的每一个样本行,去构建副表 t22 上的时间窗口。我们按照商户ID `mid` ,对 t11 中的每一行数据,从 t22 中获取对应的拼接数据,然后按消费时间(`purchase_time`) 排序,构造副表拼接窗口。比如我们定义一个 `w_t2_10d` 的窗口:不包含主表除了当前行以外的数据行,加上副表通过 `mid` 拼接上的十天以内的数据,示意图如下所示。可以看到,黄色和蓝色阴影部分,分别定义了样本 6 和样本 9 的副表拼接窗口。
+可以看到,抽取以后生成的表格 t11 和 t22,已经具有了相同的 schema,两者可以进行逻辑上的 UNION 操作。但是在 OpenMLDB 中,WINDOW UNION 并不是真的为了进行传统数据库中的 UNION 操作,而是为了对于 t11 中的每一个样本行,去构建副表 t22 上的时间窗口。
+我们按照商户ID `mid` ,对 t11 中的每一行数据,从 t22 中获取对应的拼接数据,然后按消费时间(`purchase_time`) 排序,构造副表拼接窗口。 比如定义一个 `w_t2_10d` 的窗口:不包含主表除了当前行以外的数据行,加上副表通过 `mid` 拼接上的十天以内的数据,示意图如下所示。 可以看到,黄色和蓝色阴影部分,分别定义了样本 6 和样本 9 的副表拼接窗口。
![img](images/t11_t22.jpg)
-该窗口定义过程的 SQL 脚本如下所示(注意,这还不是一个完整的 SQL):
+该窗口定义的 SQL 脚本如下所示(注意,这还不是一个完整的 SQL):
```sql
(SELECT id, mid, trans_time as purchase_time, 0.0 as purchase_amt, "" as purchage_type FROM t1) as t11
@@ -154,7 +163,7 @@ ROWS_RANGE BETWEEN 10d PRECEDING AND 1 PRECEDING INSTANCE_NOT_IN_WINDOW)
## 3.2 步骤二:构建副表多行聚合特征
-对于副表拼接窗口进行多行聚合函数加工,构造多行副表聚合特征,使得最后生成的行数和主表相同。以简单聚合函数为例,我们可以构造样本的副表拼接特征:商户的最近10天的零售总额`w10d_merchant_purchase_amt_sum`,商户的最近10天消费总次数`w10d_merchant_purchase_count`。以下 SQL 基于上面 3.1 中所定义的副表拼接窗口,构建多行聚合特征。
+对于副表拼接窗口进行多行聚合函数加工,构造多行副表聚合特征,使得最后生成的行数和主表相同。以简单聚合函数为例,我们可以构造样本的副表拼接特征:商户的最近10天的零售总额`w10d_merchant_purchase_amt_sum`,商户的最近10天消费总次数`w10d_merchant_purchase_count`。以下 SQL 基于 [3.1](#31-步骤一-定义副表拼接窗口) 中所定义的副表拼接窗口,构建多行聚合特征。
```sql
SELECT
@@ -173,7 +182,7 @@ ROWS_RANGE BETWEEN 10d PRECEDING AND 1 PRECEDING INSTANCE_NOT_IN_WINDOW)
## 4. 特征组构建
-一般而言,一个完整特征抽取脚本将抽取几十、上百,甚至几百个特征。我们可以根据特征类型、特征关联的表和窗口将这些特征分成若干组,然后将每一组特征放置到不同的SQL子查询里;最后将这些子查询按主表ID拼接在一起。本节,我们将承接前面的例子,演示如果将各种特征拼接在一起形成一个特征大宽表。
+一般而言,一个完整特征抽取脚本将抽取几十、上百,甚至几百个特征。我们可以根据特征类型、特征关联的表和窗口将这些特征分成若干组,然后将每一组特征放置到不同的SQL子查询里; 最后将这些子查询按主表ID拼接在一起。本节,我们将接着前面的例子,演示如何将各种特征拼接在一起形成一个特征大宽表。
首先,我们将特征分成3组:
diff --git a/docs/zh/use_case/JD_recommendation.md b/docs/zh/use_case/JD_recommendation.md
new file mode 100644
index 00000000000..199c9a84a28
--- /dev/null
+++ b/docs/zh/use_case/JD_recommendation.md
@@ -0,0 +1,601 @@
+# OpenMLDB + OneFlow: 高潜用户购买意向预测
+
+本文我们将以[京东高潜用户购买意向预测问题](https://jdata.jd.com/html/detail.html?id=1)为例,示范如何使用[OpenMLDB](https://github.com/4paradigm/OpenMLDB)和 [OneFlow](https://github.com/Oneflow-Inc/oneflow) 联合来打造一个完整的机器学习应用。
+
+如何从历史数据中找出规律,去预测用户未来的购买需求,让最合适的商品遇见最需要的人,是大数据应用在精准营销中的关键问题,也是所有电商平台在做智能化升级时所需要的核心技术。京东作为中国最大的自营式电商,沉淀了数亿的忠实用户,积累了海量的真实数据。本案例以京东商城真实的用户、商品和行为数据(脱敏后)为基础,通过数据挖掘的技术和机器学习的算法,构建用户购买商品的预测模型,输出高潜用户和目标商品的匹配结果,为精准营销提供高质量的目标群体,挖掘数据背后潜在的意义,为电商用户提供更简单、快捷、省心的购物体验。本案例使用OpenMLDB进行数据挖掘,使用OneFlow中的[DeepFM](https://github.com/Oneflow-Inc/models/tree/main/RecommenderSystems/deepfm)模型进行高性能训练推理,提供精准的商品推荐。全量数据[下载链接](https://openmldb.ai/download/jd-recommendation/JD_data.tgz)。
+
+本案例基于 OpenMLDB 集群版进行教程演示。注意,本文档使用的是预编译好的 docker 镜像。如果希望在自己编译和搭建的 OpenMLDB 环境下进行测试,需要配置使用我们[面向特征工程优化的 Spark 发行版](https://openmldb.ai/docs/zh/main/tutorial/openmldbspark_distribution.html)。请参考相关[编译](https://openmldb.ai/docs/zh/main/deploy/compile.html)(参考章节:“针对OpenMLDB优化的Spark发行版”)和[安装部署文档](https://openmldb.ai/docs/zh/main/deploy/install_deploy.html)(参考章节:“部署TaskManager” - “2 修改配置文件conf/taskmanager.properties”)。
+
+## 1. 环境准备和预备知识
+
+### 1.1 OneFlow工具包安装
+OneFlow工具依赖GPU的强大算力,所以请确保部署机器具备Nvidia GPU,并且保证驱动版本 >=460.X.X [驱动版本需支持CUDA 11.0](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions)。
+使用一下指令安装OneFlow:
+```bash
+conda activate oneflow
+python3 -m pip install -f https://staging.oneflow.info/branch/master/cu112 --pre oneflow
+```
+还需要安装以下Python工具包:
+```bash
+pip install psutil petastorm pandas sklearn
+```
+ 拉取Oneflow-serving镜像:
+```bash
+docker pull oneflowinc/oneflow-serving:nightly
+```
+```{note}
+注意,此处安装的为Oneflow nightly版本,此教程验证的版本commit如下:
+Oneflow:https://github.com/Oneflow-Inc/oneflow/tree/fcf205cf57989a5ecb7a756633a4be08444d8a28
+Oneflow-serving:https://github.com/Oneflow-Inc/serving/tree/ce5d667468b6b3ba66d3be6986f41f965e52cf16
+```
+
+### 1.2 拉取和启动 OpenMLDB Docker 镜像
+- 注意,请确保 Docker Engine 版本号 >= 18.03
+- 拉取 OpenMLDB docker 镜像,并且运行相应容器
+- 下载demo文件包,并映射demo文件夹至`/root/project`,这里我们使用的路径为`demodir=/home/gtest/demo`
+```bash
+export demodir=/home/gtest/demo
+docker run -dit --name=demo --network=host -v $demodir:/root/project 4pdosc/openmldb:0.5.2 bash
+docker exec -it demo bash
+```
+- 上述镜像预装了OpenMLDB的工具等,我们需要进一步安装OneFlow推理所需依赖。
+
+因为我们将在OpenMLDB的容器中嵌入OneFlow模型推理的预处理及调用,需要安装以下的依赖。
+```bash
+pip install tritonclient[all] xxhash geventhttpclient
+```
+
+```{note}
+注意,本教程以下的OpenMLDB部分的演示命令默认均在该已经启动的 docker 容器内运行。OneFlow命令默认在 1.1 安装的OneFlow环境下运行。
+```
+
+### 1.3 初始化环境
+
+```bash
+./init.sh
+```
+我们在镜像内提供了init.sh脚本帮助用户快速初始化环境,包括:
+- 配置 zookeeper
+- 启动集群版 OpenMLDB
+
+### 1.4 启动 OpenMLDB CLI 客户端
+```bash
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+```
+```{note}
+注意,本教程大部分命令在 OpenMLDB CLI 下执行,为了跟普通 shell 环境做区分,在 OpenMLDB CLI 下执行的命令均使用特殊的提示符 `>` 。
+```
+
+### 1.5 预备知识:集群版的非阻塞任务
+集群版的部分命令是非阻塞任务,包括在线模式的 `LOAD DATA`,以及离线模式的 `LOAD DATA` ,`SELECT`,`SELECT INTO` 命令。提交任务以后可以使用相关的命令如 `SHOW JOBS`, `SHOW JOB` 来查看任务进度,详情参见离线任务管理文档。
+
+
+## 2. 机器学习训练流程
+### 2.1 流程概览
+使用OpenMLDB+OneFlow进行机器学习训练可总结为以下大致步骤。
+接下来会介绍每一个步骤的具体操作细节。
+
+### 2.2 使用OpenMLDB进行离线特征抽取
+#### 2.2.1 创建数据库和数据表
+以下命令均在 OpenMLDB CLI 环境下执行。
+```sql
+> CREATE DATABASE JD_db;
+> USE JD_db;
+> CREATE TABLE action(reqId string, eventTime timestamp, ingestionTime timestamp, actionValue int);
+> CREATE TABLE flattenRequest(reqId string, eventTime timestamp, main_id string, pair_id string, user_id string, sku_id string, time bigint, split_id int, time1 string);
+> CREATE TABLE bo_user(ingestionTime timestamp, user_id string, age string, sex string, user_lv_cd string, user_reg_tm bigint);
+> CREATE TABLE bo_action(ingestionTime timestamp, pair_id string, time bigint, model_id string, type string, cate string, br string);
+> CREATE TABLE bo_product(ingestionTime timestamp, sku_id string, a1 string, a2 string, a3 string, cate string, br string);
+> CREATE TABLE bo_comment(ingestionTime timestamp, dt bigint, sku_id string, comment_num int, has_bad_comment string, bad_comment_rate float);
+```
+也可使用sql脚本(`/root/project/create_tables.sql`)运行:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/create_tables.sql
+```
+#### 2.2.2 离线数据准备
+首先,切换到离线执行模式。接着,导入数据作为离线数据,用于离线特征计算。
+
+以下命令均在 OpenMLDB CLI 下执行。
+```sql
+> USE JD_db;
+> SET @@execute_mode='offline';
+> LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='overwrite');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='overwrite');
+```
+或使用脚本执行,并通过以下命令快速查询jobs状态:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_data.sql
+
+echo "show jobs;" | /work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+```
+```{note}
+注意,集群版 `LOAD DATA` 为非阻塞任务,可以使用命令 `SHOW JOBS` 查看任务运行状态,请等待任务运行成功( `state` 转至 `FINISHED` 状态),再进行下一步操作 。
+```
+
+#### 2.2.3 特征设计
+通常在设计特征前,用户需要根据机器学习的目标对数据进行分析,然后根据分析设计和调研特征。机器学习的数据分析和特征研究不是本文讨论的范畴,我们将不作展开。本文假定用户具备机器学习的基本理论知识,有解决机器学习问题的能力,能够理解SQL语法,并能够使用SQL语法构建特征。针对本案例,用户经过分析和调研设计了若干特征。
+
+请注意,在实际的机器学习特征调研过程中,科学家对特征进行反复试验,寻求模型效果最好的特征集。所以会不断的重复多次特征设计->离线特征抽取->模型训练过程,并不断调整特征以达到预期效果。
+
+#### 2.2.4 离线特征抽取
+用户在离线模式下,进行特征抽取,并将特征结果输出到`'/root/project/out/1'`目录下保存(对应映射为`$demodir/out/1`),以供后续的模型训练。 `SELECT` 命令对应了基于上述特征设计所产生的 SQL 特征计算脚本。以下命令均在 OpenMLDB CLI 下执行。
+```sql
+> USE JD_db;
+> select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17
+INTO OUTFILE '/root/project/out/1';
+```
+此处仅一个命令,可以使用阻塞式`LOAD DATA`,直接运行sql脚本`sync_select_out.sql`:
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/sync_select_out.sql
+```
+```{note}
+注意,集群版 `SELECT INTO` 为非阻塞任务,可以使用命令 `SHOW JOBS` 查看任务运行状态,请等待任务运行成功( `state` 转至 `FINISHED` 状态),再进行下一步操作 。
+```
+### 2.3 预处理数据集以配合DeepFM模型要求
+```{note}
+注意,以下命令在docker外执行,使用安装了1.1所描述的OneFlow运行环境
+```
+根据 [DeepFM 论文](https://arxiv.org/abs/1703.04247), 类别特征和连续特征都被当作稀疏特征对待。
+
+> χ may include categorical fields (e.g., gender, location) and continuous fields (e.g., age). Each categorical field is represented as a vector of one-hot encoding, and each continuous field is represented as the value itself, or a vector of one-hot encoding after discretization.
+
+进入demo文件夹,运行以下指令进行数据处理
+```bash
+cd $demodir/openmldb_process/
+bash process_JD_out_full.sh $demodir/out/1
+```
+对应生成parquet数据集将生成在 `$demodir/openmldb_process/out`。数据信息将被打印如下,该信息将被输入为训练的配置文件。
+```
+train samples = 11073
+val samples = 1351
+test samples = 1492
+table size array:
+4,26,16,4,11,809,1,1,5,3,17,16,7,13916,13890,13916,10000,3674,9119,7,2,13916,5,4,4,33,2,2,7,2580,3,5,13916,10,47,13916,365,17,132,32,37
+```
+
+### 2.4 启动OneFlow进行模型训练
+```{note}
+注意,以下命令在安装1.1所描述的OneFlow运行环境中运行
+```
+#### 2.4.1 修改对应`train_deepfm.sh`配置文件
+注意根据上一节所打印出的数据信息更新配置文件。具体包括`num_train_samples`,`num_val_samples`,`num_test_samples`和`table_size_array`等。
+```bash
+cd $demodir/oneflow_process/
+```
+```bash
+#!/bin/bash
+DEVICE_NUM_PER_NODE=1
+demodir="$1"
+DATA_DIR=$demodir/openmldb_process/out
+PERSISTENT_PATH=/$demodir/oneflow_process/persistent
+MODEL_SAVE_DIR=$demodir/oneflow_process/model_out
+MODEL_SERVING_PATH=$demodir/oneflow_process/model/embedding/1/model
+
+python3 -m oneflow.distributed.launch \
+--nproc_per_node $DEVICE_NUM_PER_NODE \
+--nnodes 1 \
+--node_rank 0 \
+--master_addr 127.0.0.1 \
+deepfm_train_eval_JD.py \
+--disable_fusedmlp \
+--data_dir $DATA_DIR \
+--persistent_path $PERSISTENT_PATH \
+--table_size_array "4,26,16,4,11,809,1,1,5,3,17,16,7,13916,13890,13916,10000,3674,9119,7,2,13916,5,4,4,33,2,2,7,2580,3,5,13916,10,47,13916,365,17,132,32,37" \
+--store_type 'cached_host_mem' \
+--cache_memory_budget_mb 1024 \
+--batch_size 1000 \
+--train_batches 75000 \
+--loss_print_interval 100 \
+--dnn "1000,1000,1000,1000,1000" \
+--net_dropout 0.2 \
+--learning_rate 0.001 \
+--embedding_vec_size 16 \
+--num_train_samples 11073 \
+--num_val_samples 1351 \
+--num_test_samples 1492 \
+--model_save_dir $MODEL_SAVE_DIR \
+--save_best_model \
+--save_graph_for_serving \
+--model_serving_path $MODEL_SERVING_PATH \
+--save_model_after_each_eval
+```
+#### 2.4.2 开始模型训练
+```bash
+bash train_deepfm.sh $demodir
+```
+生成模型将存放在`$demodir/oneflow_process/model_out`,用来serving的模型存放在`$demodir/oneflow_process/model/embedding/1/model`
+
+## 3. 模型上线流程
+### 3.1 流程概览
+使用OpenMLDB+OneFlow进行模型serving可总结为以下大致步骤。
+接下来会介绍每一个步骤的具体操作细节。
+
+### 3.2 配置OpenMLDB进行在线特征抽取
+
+#### 3.2.1 特征抽取SQL脚本上线
+假定2.2.3节中所设计的特征在上一步的模型训练中产出的模型符合预期,那么下一步就是将该特征抽取SQL脚本部署到线上去,以提供在线的特征抽取。
+1. 重新启动 OpenMLDB CLI,以进行 SQL 上线部署。
+ ```bash
+ docker exec -it demo bash
+ /work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client
+ ```
+2. 执行上线部署,以下命令在 OpenMLDB CLI 内执行。
+```sql
+> USE JD_db;
+> SET @@execute_mode='online';
+> deploy demo select * from
+(
+select
+ `reqId` as reqId_1,
+ `eventTime` as flattenRequest_eventTime_original_0,
+ `reqId` as flattenRequest_reqId_original_1,
+ `pair_id` as flattenRequest_pair_id_original_24,
+ `sku_id` as flattenRequest_sku_id_original_25,
+ `user_id` as flattenRequest_user_id_original_26,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_unique_count_27,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0_10_ as flattenRequest_pair_id_window_top1_ratio_28,
+ fz_top1_ratio(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_top1_ratio_29,
+ distinct_count(`pair_id`) over flattenRequest_user_id_eventTime_0s_14d_200 as flattenRequest_pair_id_window_unique_count_32,
+ case when !isnull(at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ then count_where(`pair_id`, `pair_id` = at(`pair_id`, 0)) over flattenRequest_user_id_eventTime_0_10_ else null end as flattenRequest_pair_id_window_count_35,
+ dayofweek(timestamp(`eventTime`)) as flattenRequest_eventTime_dayofweek_41,
+ case when 1 < dayofweek(timestamp(`eventTime`)) and dayofweek(timestamp(`eventTime`)) < 7 then 1 else 0 end as flattenRequest_eventTime_isweekday_43
+from
+ `flattenRequest`
+ window flattenRequest_user_id_eventTime_0_10_ as (partition by `user_id` order by `eventTime` rows between 10 preceding and 0 preceding),
+ flattenRequest_user_id_eventTime_0s_14d_200 as (partition by `user_id` order by `eventTime` rows_range between 14d preceding and 0s preceding MAXSIZE 200))
+as out0
+last join
+(
+select
+ `flattenRequest`.`reqId` as reqId_3,
+ `action_reqId`.`actionValue` as action_actionValue_multi_direct_2,
+ `bo_product_sku_id`.`a1` as bo_product_a1_multi_direct_3,
+ `bo_product_sku_id`.`a2` as bo_product_a2_multi_direct_4,
+ `bo_product_sku_id`.`a3` as bo_product_a3_multi_direct_5,
+ `bo_product_sku_id`.`br` as bo_product_br_multi_direct_6,
+ `bo_product_sku_id`.`cate` as bo_product_cate_multi_direct_7,
+ `bo_product_sku_id`.`ingestionTime` as bo_product_ingestionTime_multi_direct_8,
+ `bo_user_user_id`.`age` as bo_user_age_multi_direct_9,
+ `bo_user_user_id`.`ingestionTime` as bo_user_ingestionTime_multi_direct_10,
+ `bo_user_user_id`.`sex` as bo_user_sex_multi_direct_11,
+ `bo_user_user_id`.`user_lv_cd` as bo_user_user_lv_cd_multi_direct_12
+from
+ `flattenRequest`
+ last join `action` as `action_reqId` on `flattenRequest`.`reqId` = `action_reqId`.`reqId`
+ last join `bo_product` as `bo_product_sku_id` on `flattenRequest`.`sku_id` = `bo_product_sku_id`.`sku_id`
+ last join `bo_user` as `bo_user_user_id` on `flattenRequest`.`user_id` = `bo_user_user_id`.`user_id`)
+as out1
+on out0.reqId_1 = out1.reqId_3
+last join
+(
+select
+ `reqId` as reqId_14,
+ max(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_max_13,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0_10_ as bo_comment_bad_comment_rate_multi_min_14,
+ min(`bad_comment_rate`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_bad_comment_rate_multi_min_15,
+ distinct_count(`comment_num`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_unique_count_22,
+ distinct_count(`has_bad_comment`) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_unique_count_23,
+ fz_topn_frequency(`has_bad_comment`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_has_bad_comment_multi_top3frequency_30,
+ fz_topn_frequency(`comment_num`, 3) over bo_comment_sku_id_ingestionTime_0s_64d_100 as bo_comment_comment_num_multi_top3frequency_33
+from
+ (select `eventTime` as `ingestionTime`, bigint(0) as `dt`, `sku_id` as `sku_id`, int(0) as `comment_num`, '' as `has_bad_comment`, float(0) as `bad_comment_rate`, reqId from `flattenRequest`)
+ window bo_comment_sku_id_ingestionTime_0s_64d_100 as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows_range between 64d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_comment_sku_id_ingestionTime_0_10_ as (
+UNION (select `ingestionTime`, `dt`, `sku_id`, `comment_num`, `has_bad_comment`, `bad_comment_rate`, '' as reqId from `bo_comment`) partition by `sku_id` order by `ingestionTime` rows between 10 preceding and 0 preceding INSTANCE_NOT_IN_WINDOW))
+as out2
+on out0.reqId_1 = out2.reqId_14
+last join
+(
+select
+ `reqId` as reqId_17,
+ fz_topn_frequency(`br`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_br_multi_top3frequency_16,
+ fz_topn_frequency(`cate`, 3) over bo_action_pair_id_ingestionTime_0s_10h_100 as bo_action_cate_multi_top3frequency_17,
+ fz_topn_frequency(`model_id`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_top3frequency_18,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_model_id_multi_unique_count_19,
+ distinct_count(`model_id`) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_model_id_multi_unique_count_20,
+ distinct_count(`type`) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_unique_count_21,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_7d_100 as bo_action_type_multi_top3frequency_40,
+ fz_topn_frequency(`type`, 3) over bo_action_pair_id_ingestionTime_0s_14d_100 as bo_action_type_multi_top3frequency_42
+from
+ (select `eventTime` as `ingestionTime`, `pair_id` as `pair_id`, bigint(0) as `time`, '' as `model_id`, '' as `type`, '' as `cate`, '' as `br`, reqId from `flattenRequest`)
+ window bo_action_pair_id_ingestionTime_0s_10h_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 10h preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_7d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 7d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW),
+ bo_action_pair_id_ingestionTime_0s_14d_100 as (
+UNION (select `ingestionTime`, `pair_id`, `time`, `model_id`, `type`, `cate`, `br`, '' as reqId from `bo_action`) partition by `pair_id` order by `ingestionTime` rows_range between 14d preceding and 0s preceding MAXSIZE 100 INSTANCE_NOT_IN_WINDOW))
+as out3
+on out0.reqId_1 = out3.reqId_17;
+```
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/deploy.sql
+```
+
+可使用如下命令确认deploy信息:
+```sql
+show deployment demo;
+```
+#### 3.2.2 在线数据准备
+首先,请切换到**在线**执行模式。接着在在线模式下,导入数据作为在线数据,用于在线特征计算。以下命令均在 OpenMLDB CLI 下执行。
+```sql
+> USE JD_db;
+> SET @@execute_mode='online';
+> LOAD DATA INFILE '/root/project/data/JD_data/action/*.parquet' INTO TABLE action options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/flattenRequest_clean/*.parquet' INTO TABLE flattenRequest options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_user/*.parquet' INTO TABLE bo_user options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_action/*.parquet' INTO TABLE bo_action options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_product/*.parquet' INTO TABLE bo_product options(format='parquet', header=true, mode='append');
+> LOAD DATA INFILE '/root/project/data/JD_data/bo_comment/*.parquet' INTO TABLE bo_comment options(format='parquet', header=true, mode='append');
+```
+
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /root/project/load_online_data.sql
+```
+```{note}
+注意,集群版 `LOAD DATA` 为非阻塞任务,可以使用命令 `SHOW JOBS` 查看任务运行状态,请等待任务运行成功( `state` 转至 `FINISHED` 状态),再进行下一步操作 。
+```
+### 3.3 配置OneFlow推理服务
+#### 3.3.1 检查模型路径(`$demodir/oneflow_process/model`)中模型文件及组织方式是否正确
+```
+$ tree -L 5 model/
+model/
+└── embedding
+ ├── 1
+ │ └── model
+ │ ├── model.mlir
+ │ ├── module.dnn_layer.linear_layers.0.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.0.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.12.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.12.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.15.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.15.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.3.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.3.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.6.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.6.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.9.bias
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.dnn_layer.linear_layers.9.weight
+ │ │ ├── meta
+ │ │ └── out
+ │ ├── module.embedding_layer.one_embedding.shadow
+ │ │ ├── meta
+ │ │ └── out
+ │ └── one_embedding_options.json
+ └── config.pbtxt
+ ```
+#### 3.3.2 确认`config.pbtxt`中的配置正确。
+ ```
+name: "embedding"
+backend: "oneflow"
+max_batch_size: 10000
+
+input [
+ {
+ name: "INPUT_0"
+ data_type: TYPE_INT64
+ dims: [ 41 ]
+ }
+]
+
+output [
+ {
+ name: "OUTPUT_0"
+ data_type: TYPE_FP32
+ dims: [ 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_GPU
+ gpus: [ 0 ]
+ }
+]
+ ```
+ 其中`name`要和`config.pbtxt`所在目录的名字保持一致
+
+#### 3.3.3 变更persistent路径
+变更`one_embedding_options.json`文件中的persistent table路径。将`embedding/kv_options/kv_store/persistent_table/path` 变更为映射到容器里面的persistent table的位置 `/root/demo/persistent`。
+```
+{
+ "embedding": [
+ {
+ "snapshot": "2022-09-29-03-27-44-953674",
+ "kv_options": {
+ "name": "sparse_embedding",
+ "key_type_size": 8,
+ "value_type_size": 4,
+ "value_type": "oneflow.float32",
+ "storage_dim": 51,
+ "kv_store": {
+ "caches": [
+ {
+ "policy": "lru",
+ "cache_memory_budget_mb": 1024,
+ "value_memory_kind": "device"
+ },
+ {
+ "policy": "full",
+ "capacity": 110477,
+ "value_memory_kind": "host"
+ }
+ ],
+ "persistent_table": {
+ "path": "/root/demo/persistent",
+ "physical_block_size": 4096,
+ "capacity_hint": 110477
+ }
+ },
+ "parallel_num": 1
+ }
+ }
+ ]
+}
+```
+
+### 3.4 启动推理服务
+#### 3.4.1 启动OneFlow推理服务
+```{note}
+注意,以下命令在安装1.1所描述的OneFlow运行环境中运行
+```
+使用一下命令启动OneFlow推理服务:
+```
+docker run --runtime=nvidia --rm --network=host \
+ -v $demodir/oneflow_process/model:/models \
+ -v $demodir/oneflow_process/persistent:/root/demo/persistent \
+ oneflowinc/oneflow-serving:nightly \
+ bash -c '/opt/tritonserver/bin/tritonserver --model-repository=/models'
+```
+若成功,将显示如下类似输出:
+```
+...
+I0929 07:28:34.281655 1 grpc_server.cc:4117] Started GRPCInferenceService at 0.0.0.0:8001
+I0929 07:28:34.282343 1 http_server.cc:2815] Started HTTPService at 0.0.0.0:8000
+I0929 07:28:34.324662 1 http_server.cc:167] Started Metrics Service at 0.0.0.0:8002
+
+```
+#### 3.4.2 启动OpenMLDB推理服务
+```{note}
+注意,以下命令在demo docker中运行。
+```
+OpenMLDB 的在线特征计算服务已通过 SQL 上线完成,OneFlow 推理服务也已经启动。这个 demo 将串联两者,在收到实时请求后,访问 OpenMLDB 进行特征抽取,再访问 OneFlow 推理服务,进行在线推理,最后返回推理结果。
+1. 如果尚未退出 OpenMLDB CLI,请使用 `quit` 命令退出 OpenMLDB CLI。
+2. 在普通命令行下启动预估服务:
+```bash
+cd /root/project/serving/openmldb_serving
+./start_predict_server.sh 0.0.0.0:9080
+```
+
+### 3.5 发送预估请求
+预估请求可在OpenMLDB的容器外执行。容器外部访问的具体信息可参见[IP 配置](https://openmldb.ai/docs/zh/main/reference/ip_tips.html)。
+在普通命令行下执行内置的 `predict.py` 脚本。该脚本发送一行请求数据到预估服务,接收返回的预估结果,并打印出来。
+```bash
+python $demodir/serving/predict.py
+```
+范例输出:
+```
+----------------ins---------------
+['200001_80005_2016-03-31 18:11:20' 1459419080000
+ '200001_80005_2016-03-31 18:11:20' '200001_80005' '80005' '200001' 1 1.0
+ 1.0 1 1 5 1 '200001_80005_2016-03-31 18:11:20' None None None None None
+ None None None None None None '200001_80005_2016-03-31 18:11:20'
+ 0.019200000911951065 0.0 0.0 2 2 '1,,NULL' '4,0,NULL'
+ '200001_80005_2016-03-31 18:11:20' ',NULL,NULL' ',NULL,NULL' ',NULL,NULL'
+ 1 1 1 ',NULL,NULL' ',NULL,NULL']
+---------------predict change of purchase -------------
+[[b'0.006222:0']]
+```
diff --git a/docs/zh/use_case/OpenMLDB_Byzer_taxi.md b/docs/zh/use_case/OpenMLDB_Byzer_taxi.md
new file mode 100644
index 00000000000..16499b95868
--- /dev/null
+++ b/docs/zh/use_case/OpenMLDB_Byzer_taxi.md
@@ -0,0 +1,275 @@
+# OpenMLDB + Byzer: 基于 SQL 打造端到端机器学习应用
+
+本文示范如何使用[OpenMLDB](https://github.com/4paradigm/OpenMLDB)和 [Byzer](https://www.byzer.org/home) 联合完成一个完整的机器学习应用。OpenMLDB在本例中接收Byzer发送的指令和数据,完成数据的实时特征计算,并经特征工程处理后的数据集返回Byzer,供其进行后续的机器学习训练和预测。
+
+## 1. 准备工作
+
+### 1.1 安装 OpenMLDB 引擎
+
+1. 本例使用的是运行在Docker容器中的OpenMLDB集群版。安装步骤详见[OpenMLDB快速上手](../quickstart/openmldb_quickstart.md)。
+2. 本例中,Byzer引擎需要从容器外部访问OpenMLDB服务,需要修改OpenMLDB的原始IP配置,修改方式详见[IP配置文档](../reference/ip_tips.md)。
+
+### 1.2 安装 Byzer 引擎和Byzer Notebook
+
+1. Byzer 引擎的安装步骤详见[Byzer Language官方文档](https://docs.byzer.org/#/byzer-lang/zh-cn/)
+
+2. 本例需要使用 Byzer 提供的[OpenMLDB 插件](https://github.com/byzer-org/byzer-extension/tree/master/byzer-openmldb)完成与 OpenMLDB 的消息传递。在Byzer中使用插件必须配置`streaming.datalake.path`项,详见[Byzer引擎配置说明-常用参数](https://docs.byzer.org/#/byzer-lang/zh-cn/installation/configuration/byzer-lang-configuration)。
+
+3. 本文使用 Byzer Notebook 进行演示,Byzer 引擎安装完成后,请安装Byzer Notebook(您也可以使用[VSCode中的Byzer插件](https://docs.byzer.org/#/byzer-lang/zh-cn/installation/vscode/byzer-vscode-extension-installation)连接您的Byzer 引擎)。关于Byzer Notebook,详见[Byzer Notebook官方文档](https://docs.byzer.org/#/byzer-notebook/zh-cn/)。其界面如下。
+
+ ![Byzer_Notebook](images/Byzer_Notebook.jpg)
+
+### 1.3 准备数据集
+
+本文使用的是Kaggle出租车行车时间数据集,若您的Byzer数据湖中没有该数据集,可以从以下网址获得:[Kaggle出租车行车时间预测问题](https://www.kaggle.com/c/nyc-taxi-trip-duration/overview)。将数据集下载到本地后,需要将其导入Byzer Notebook。
+
+## 2. 机器学习全流程
+
+### 2.1 加载原始数据集
+
+将原始数据集导入到 Byzer Notebook 数据目录的File System后,自动生成了`tmp/upload`存储路径。使用Byzer Lang的`load`命令加载该数据集。
+
+```sql
+load csv.`tmp/upload/train.csv` where delimiter=","
+and header = "true"
+as taxi_tour_table_train_simple;
+```
+
+### 2.2 将数据导入 OpenMLDB
+
+安装 OpenMLDB 插件
+
+```sql
+!plugin app add - "byzer-openmldb-3.0";
+```
+
+使用该插件连接 OpenMLDB 引擎。在Byzer Notebook中运行该代码块前,请确保OpenMLDB引擎已启动,并创建了名为`db1`的数据库。
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='offline';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+CREATE TABLE t1(id string, vendor_id int, pickup_datetime timestamp, dropoff_datetime timestamp, passenger_count int, pickup_longitude double, pickup_latitude double, dropoff_longitude double, dropoff_latitude double, store_and_fwd_flag string, trip_duration int);
+'''
+and `sql-3`='''
+LOAD DATA INFILE 'tmp/upload/train.csv'
+INTO TABLE t1 options(format='csv',header=true,mode='append');
+'''
+and db="db1"
+and action="ddl";
+```
+
+```{note}
+1. zkAddress的端口号应与配置IP时的conf文件夹下各相关文件保持一致
+2. 可以通过 $BYZER_HOME\conf 路径下的 \byzer.properties.override 文件中的属性`streaming.plugin.clzznames`检查byzer-openmldb-3.0插件是否成功安装。如果成功安装了该插件,可以看到主类名`tech.mlsql.plugins.openmldb.ByzerApp`。
+3. 若未成功安装,可以手动下载jar包再以[离线方式](https://docs.byzer.org/#/byzer-lang/zh-cn/extension/installation/offline_install)安装配置。
+```
+
+
+
+### 2.3 进行实时特征计算
+
+本例借用[OpenMLDB + LightGBM:出租车行程时间预测](./taxi_tour_duration_prediction.md)2.3节中设计的特征进行特征计算,并将处理后的数据集导出为本地csv文件。
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='offline';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+SELECT trp_duration, passanger_count,
+sum(pickup_latitude) OVER w AS vendor_sum_pl,
+max(pickup_latitude) OVER w AS vendor_max_pl,
+min(pickup_latitude) OVER w AS vendor_min_pl,
+avg(pickup_latitude) OVER W AS vendor_avg_pl,
+sum(pickup_latitude) OVER w2 AS pc_sum_pl,
+max(pickup_latitude) OVER w2 AS pc_max_pl,
+min(pickup_latitude) OVER w2 AS pc_min_pl,
+avg(pickup_latitude) OVER w2 AS pc_avg_pl,
+count(vendor_id) OVER w2 AS pc_cnt,
+count(vendor_id) OVER w AS vendor_cnt
+FROM t1
+WINDOW w AS(PARTITION BY vendor_id ORDER BY ickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+w2 AS(PARTITION BY passenger_count ORDER BY pickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW) INTO OUTFILE '/tmp/feature_data';
+'''
+and db="db1"
+and action="ddl";
+```
+
+
+
+### 2.4 数据向量化
+
+在Byzer Notebook中将所有int 类型字段都转化为 double。
+
+```sql
+select *,
+cast(passenger_count as double) as passenger_count_d,
+cast(pc_cnt as double) as pc_cnt_d,
+cast(vendor_cnt as double) as vendor_cnt_d
+from feature_data
+as new_feature_data;
+```
+
+接着把所有字段合并成一个向量。
+
+```sql
+select vec_dense(array(
+passenger_count_d,
+vendor_sum_pl,
+vendor_max_pl,
+vendor_min_pl,
+vendor_avg_pl,
+pc_sum_pl,
+pc_max_pl,
+pc_min_pl,
+pc_avg_pl,
+pc_cnt_d,
+vendor_cnt
+)) as features,cast(trip_duration as double) as label
+from new_feature_data
+as trainning_table;
+
+```
+
+
+
+### 2.5 模型训练
+
+使用Byzer Lang的`train`命令和其[内置的线性回归算法](https://docs.byzer.org/#/byzer-lang/zh-cn/ml/algs/linear_regression)训练模型,并将训练好的模型保存到/model/tax-trip路径下。
+
+```sql
+train trainning_table as LinearRegression.`/model/tax-trip` where
+
+keepVersion="true"
+
+and evaluateTable="trainning_table"
+and `fitParam.0.labelCol`="label"
+and `fitParam.0.featuresCol`= "features"
+and `fitParam.0.maxIter`="50";
+
+```
+
+```{note}
+可以使用`!show et/params/LinearRegression;`命令查看Byzer内置的线性回归模型的相关参数。
+```
+
+### 2.6 特征部署
+
+将特征计算逻辑部署到OpenMLDB上:将最满意的一次特征计算的代码拷贝后修改执行模式为online即可。本例使用的是前文展示的特征工程中的代码,仅作展示,或许并非表现最优。
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='online';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+SELECT trp_duration, passanger_count,
+sum(pickup_latitude) OVER w AS vendor_sum_pl,
+max(pickup_latitude) OVER w AS vendor_max_pl,
+min(pickup_latitude) OVER w AS vendor_min_pl,
+avg(pickup_latitude) OVER W AS vendor_avg_pl,
+sum(pickup_latitude) OVER w2 AS pc_sum_pl,
+max(pickup_latitude) OVER w2 AS pc_max_pl,
+min(pickup_latitude) OVER w2 AS pc_min_pl,
+avg(pickup_latitude) OVER w2 AS pc_avg_pl,
+count(vendor_id) OVER w2 AS pc_cnt,
+count(vendor_id) OVER w AS vendor_cnt
+FROM t1
+WINDOW w AS(PARTITION BY vendor_id ORDER BY ickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW),
+w2 AS(PARTITION BY passenger_count ORDER BY pickup_datetime ROWS_RANGE BETWEEN 1d PRECEDING AND CURRENT ROW) INTO OUTFILE '/tmp/feature_data_test';
+'''
+and db="db1"
+and action="ddl";
+
+```
+
+导入在线数据,本例使用的是原始数据集中的test集。生产环境中可以接入实时数据源。
+
+```sql
+run command as FeatureStoreExt.`` where
+zkAddress="172.17.0.2:7527"
+and `sql-0`='''
+SET @@execute_mode='online';
+'''
+and `sql-1`='''
+SET @@job_timeout=20000000;
+'''
+and `sql-2`='''
+CREATE TABLE t1(id string, vendor_id int, pickup_datetime timestamp, dropoff_datetime timestamp, passenger_count int, pickup_longitude double, pickup_latitude double, dropoff_longitude double, dropoff_latitude double, store_and_fwd_flag string, trip_duration int);
+'''
+and `sql-3`='''
+LOAD DATA INFILE 'tmp/upload/test.csv'
+INTO TABLE t1 options(format='csv',header=true,mode='append');
+'''
+and db="db1"
+and action="ddl";
+```
+
+
+
+### 2.7 模型部署
+
+在Byzer Notebook中将之前保存的、训练好的模型注册为一个可以直接使用的函数。
+
+```sql
+register LinearRegression.`/model/tax-trip` as tax_trip_model_predict;
+```
+
+### 2.8 预测
+
+将经OpenMLDB处理后的在线数据集的所有int类型字段转成double。
+
+```sql
+select *,
+cast(passenger_count as double) as passenger_count_d,
+cast(pc_cnt as double) as pc_cnt_d,
+cast(vendor_cnt as double) as vendor_cnt_d
+from feature_data_test
+as new_feature_data_test;
+```
+
+再进行向量化。
+
+```sql
+select vec_dense(array(
+passenger_count_d,
+vendor_sum_pl,
+vendor_max_pl,
+vendor_min_pl,
+vendor_avg_pl,
+pc_sum_pl,
+pc_max_pl,
+pc_min_pl,
+pc_avg_pl,
+pc_cnt_d,
+vendor_cnt
+)) as features,
+from new_feature_data_test
+as testing_table;
+```
+
+使用处理后的测试集进行预测。
+
+```sql
+select tax_trip_model_predict(testing_table) as predict_label;
+```
+
+
+
+
+
diff --git a/docs/zh/use_case/airflow_provider_demo.md b/docs/zh/use_case/airflow_provider_demo.md
new file mode 100644
index 00000000000..a204a36faee
--- /dev/null
+++ b/docs/zh/use_case/airflow_provider_demo.md
@@ -0,0 +1,123 @@
+# Airflow OpenMLDB Provider 使用案例
+我们提供了[Airflow OpenMLDB Provider](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/airflow-provider-openmldb),使得在Airflow DAG中能更容易地使用OpenMLDB。
+
+本案例将通过Airflow编排[TalkingData](talkingdata_demo)的训练与上线过程。
+
+## TalkingData DAG
+
+Airflow中需要编写DAG文件,本案例使用example中的[example_openmldb_complex.py](https://github.com/4paradigm/OpenMLDB/blob/main/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb_complex.py)。
+
+![airflow dag](images/airflow_dag.png)
+
+DAG流程如上图所示,首先建表,然后进行离线数据导入与特征抽取,如果效果良好(auc>=99.0),就进行SQL和模型的上线。反之,则报告失败。
+
+在接下来的演示中,可以将这个DAG直接导入Airflow并运行。
+
+## 演示
+
+我们导入上述的DAG完成TalkingData Demo中的特征计算与上线,并使用TalkingData Demo的predict server来进行上线后的实时推理测试。
+
+### 准备
+
+#### 下载DAG
+
+除了DAG文件,还需要训练的脚本,所以我们提供了[下载包](https://openmldb.ai/download/airflow_demo/airflow_demo_files.tar.gz),可以直接下载。如果想要使用最新版本,请在[github example_dags](https://github.com/4paradigm/OpenMLDB/tree/main/extensions/airflow-provider-openmldb/openmldb_provider/example_dags)中获取。
+
+```
+wget https://openmldb.ai/download/airflow_demo/airflow_demo_files.tar.gz
+tar zxf airflow_demo_files.tar.gz
+ls airflow_demo_files
+```
+#### 启动镜像
+
+我们推荐使用docker镜像直接启动OpenMLDB,并在docker内部安装启动Airflow。
+
+登录Airflow Web需要对外端口,所以此处暴露容器的端口。并且直接将上一步下载的文件映射到`/work/airflow/dags`,接下来Airflow将加载此文件夹的DAG。
+
+```
+docker run -p 8080:8080 -v `pwd`/airflow_demo_files:/work/airflow/dags -it 4pdosc/openmldb:0.6.3 bash
+```
+
+#### 下载安装Airflow与Airflow OpenMLDB Provider
+在docker容器中,执行:
+```
+pip3 install airflow-provider-openmldb
+```
+由于airflow-provider-openmldb依赖airflow,所以会一起下载。
+
+#### 源数据准备
+由于在DAG中导入数据用的文件为`/tmp/train_sample.csv`,所以我们需要将sample数据文件拷贝到tmp目录。
+```
+cp /work/talkingdata/train_sample.csv /tmp/
+```
+
+### 步骤1: 启动OpenMLDB与Airflow
+以下命令,将启动OpenMLDB cluster,支持上线并测试的predict server,与Airflow standalone。
+```
+/work/init.sh
+python3 /work/talkingdata/predict_server.py --no-init > predict.log 2>&1 &
+export AIRFLOW_HOME=/work/airflow
+cd /work/airflow
+airflow standalone
+```
+
+Airflow standalone运行输出将提示登录用户名和密码,如下图所示。
+
+![airflow login](images/airflow_login.png)
+
+登录Airflow Web界面 `http://localhost:8080`,并输入用户名和密码。
+
+```{caution}
+`airflow standalone`为前台程序,退出即airflow退出。你可以在dag运行完成后再退出airflow进行步骤3的测试,或者将airflow进程放入后台。
+```
+
+### 步骤2: 运行DAG
+在Airflow Web中点击DAG example_openmldb_complex,可以点击`Code`查看DAG的详情,见下图。
+
+![dag home](images/dag_home.png)
+
+在Code中可以看到使用的`openmldb_conn_id`,如下图所示。DAG不是直接使用OpenMLDB的地址,而是使用connection,所以我们需要新建一个同名的connection。
+
+![dag code](images/dag_code.png)
+
+#### 创建connection
+在管理界面中点击connection。
+![connection](images/connection.png)
+
+再添加connection。
+![add connection](images/add_connection.png)
+
+Airflow OpenMLDB Provider是连接OpenMLDB Api Server的,所以此处配置中填入OpenMLDB Api Server的地址,而不是zookeeper地址。
+
+![connection settings](images/connection_settings.png)
+
+创建完成后的connection如下图所示。
+![display](images/connection_display.png)
+
+#### 运行DAG
+运行dag,即完成一次训练模型、sql部署与模型部署。成功运行的结果,类似下图。
+![dag run](images/dag_run.png)
+
+### 步骤3: 测试
+
+Airflow如果在容器中是前台运行的,现在可以退出,以下测试将不依赖airflow。
+
+#### 在线导入
+Airflow DAG中完成了SQL和模型的上线。但在线存储中还没有数据,所以我们需要做一次在线数据导入。
+```
+curl -X POST http://127.0.0.1:9080/dbs/example_db -d'{"mode":"online", "sql":"load data infile \"file:///tmp/train_sample.csv\" into table example_table options(mode=\"append\");"}'
+```
+
+这是一个异步操作,但由于数据量小,也会很快完成。通过`SHOW JOBS`也可以查看导入操作的状态。
+```
+curl -X POST http://127.0.0.1:9080/dbs/example_db -d'{"mode":"online", "sql":"show jobs"}'
+```
+
+#### 测试
+执行预测脚本,进行一次预测,预测将使用新部署好的sql与模型。
+```
+python3 /work/talkingdata/predict.py
+```
+结果如下所示。
+![result](images/airflow_test_result.png)
+
diff --git a/docs/zh/use_case/dolphinscheduler_task_demo.md b/docs/zh/use_case/dolphinscheduler_task_demo.md
index 1e4e62818a5..838f1416536 100644
--- a/docs/zh/use_case/dolphinscheduler_task_demo.md
+++ b/docs/zh/use_case/dolphinscheduler_task_demo.md
@@ -29,35 +29,47 @@ OpenMLDB 希望能达成开发即上线的目标,让开发回归本质,而
**运行 OpenMLDB 镜像**
-推荐在我们提供的 OpenMLDB 镜像内进行演示测试:
+测试可以在macOS或Linux上运行,推荐在我们提供的 OpenMLDB 镜像内进行演示测试。我们将在这个容器中启动OpenMLDB和DolphinScheduler,暴露DolphinScheduler的web端口:
```
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
```{attention}
-DolphinScheduler 需要操作系统的用户,并且该用户需要有 sudo 权限。所以推荐在 OpenMLDB 容器内下载并启动 DolphinScheduler。否则,请准备有sudo权限的操作系统用户。
+DolphinScheduler 需要配置租户,是操作系统的用户,并且该用户需要有 sudo 权限。所以推荐在 OpenMLDB 容器内下载并启动 DolphinScheduler。否则,请准备有sudo权限的操作系统用户。
```
-在容器中,可以直接运行以下命令启动 OpenMLDB cluster。
+由于我们的docker镜像目前没有安装sudo,而DolphinScheduler运行工作流时会使用sudo,所以请在容器中先安装:
```
-./init.sh
+apt update && apt install sudo
```
-**运行 Predict Server**
+DolphinScheduler 运行task使用的sh,而我们的docker默认sh为`dash`,我们将其修改为`bash`:
+```
+dpkg-reconfigure dash
+```
+输入`no`。
-我们将完成一个导入数据,离线训练,训练成功后模型上线的工作流。模型上线的部分,可以使用简单的predict server,见[predict server source](https://raw.githubusercontent.com/4paradigm/OpenMLDB/main/demo/talkingdata-adtracking-fraud-detection/predict_server.py)。你可以将它下载至本地,并运行至后台:
+**运行 OpenMLDB集群与 Predict Server**
+
+在容器中运行以下命令启动 OpenMLDB cluster:
```
-python3 predict_server.py --no-init > predict.log 2>&1 &
+/work/init.sh
+```
+
+我们将完成一个导入数据,离线训练,训练成功后模型上线的工作流。模型上线的部分,可以使用`/work/talkingdata`中的的predict server来完成。将它运行至后台:
+```
+python3 /work/talkingdata/predict_server.py --no-init > predict.log 2>&1 &
```
**运行 DolphinScheduler**
-DolphinScheduler 支持 OpenMLDB Task 的版本,请下载[dolphinscheduler-bin](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz)。注意,由于目前 DolphinScheduler 官方尚未发布最新的包含 OpenMLDB Task 的 release 版本(仅有 `dev` 版本),所以我们直接提供了一个可供下载版本。稍后 DolphinScheduler 更新发布以后则无须分开下载。
+DolphinScheduler 支持 OpenMLDB Task 的版本,我们直接提供了一个可供下载版本,点击下载[dolphinscheduler-bin](http://openmldb.ai/download/dolphinschduler-task/apache-dolphinscheduler-dev-SNAPSHOT-bin.tar.gz)。(由于目前 DolphinScheduler 官方尚未发布包含 OpenMLDB Task 的 release 版本(仅有 `dev` 版本),待 DolphinScheduler 正式版发布以后则直接下载正式版)
启动 DolphinScheduler standalone,步骤如下,更多请参考[官方文档](https://dolphinscheduler.apache.org/en-us/docs/3.0.0/user_doc/guide/installation/standalone.html)。
```
tar -xvzf apache-dolphinscheduler-*-bin.tar.gz
cd apache-dolphinscheduler-*-bin
-sh ./bin/dolphinscheduler-daemon.sh start standalone-server
+sed -i s#/opt/soft/python#/usr/bin/python3#g bin/env/dolphinscheduler_env.sh
+./bin/dolphinscheduler-daemon.sh start standalone-server
```
浏览器访问地址 http://localhost:12345/dolphinscheduler/ui 即可登录系统UI。默认的用户名和密码是 admin/dolphinScheduler123。
@@ -65,31 +77,34 @@ sh ./bin/dolphinscheduler-daemon.sh start standalone-server
DolphinScheduler 的 worker server 需要 OpenMLDB Python SDK, DolphinScheduler standalone 的 worker 即本机,所以只需在本机安装OpenMLDB Python SDK。我们的OpenMLDB镜像中已经安装了。如果你在别的环境中,请运行:
```
-pip3 install openmldb
+
+```{note}
+DolphinScheduler 的 worker server 需要 OpenMLDB Python SDK, DolphinScheduler standalone 的 worker 即本机,所以只需在本机安装OpenMLDB Python SDK。我们的OpenMLDB镜像中已经安装了。如果你在别的环境中,请安装openmldb sdk:`pip3 install openmldb`。
```
-**下载工作流配置并配置 Python 环境**
+**下载工作流配置**
-工作流可以手动创建,为了简化演示,我们直接提供了 json 工作流文件,[点击下载](https://github.com/4paradigm/OpenMLDB/releases/download/v0.5.1/workflow_openmldb_demo.json),稍后可以直接导入到 DolphinScheduler 环境中,并做简单的修改,即可完成全工作流。
+工作流可以手动创建,为了简化演示,我们直接提供了 json 工作流文件,[点击下载](http://openmldb.ai/download/dolphinschduler-task/workflow_openmldb_demo.json),稍后可以直接导入到 DolphinScheduler 环境中,并做简单的修改(见下文的演示),即可完成全工作流。
-Python task 需要显式设置 Python 环境,最简单的办法是在bin/env/dolphinscheduler_env.sh中修改`PYTHON_HOME`,再启动 DolphinScheduler 。请填写python3的绝对路径,而不是相对路径。
-```{caution}
-注意,在 DolphinScheduler standalone 运行前,配置的临时环境变量`PYTHON_HOME`不会影响work server中的环境。
-```
-如果你已经启动 DolphinScheduler ,也可以在启动后的web页面中进行环境设置,设置方法如下。**注意,这样的情况下,需要确认工作流中的task都使用该环境。**
-![ds env setting](images/ds_env_setting.png)
+**源数据**
-![set python env](images/set_python_env.png)
+工作流会从`/tmp/train_sample.csv`导入数据到OpenMLDB,所以准备一下源数据:
+```
+cp /work/talkingdata/train_sample.csv /tmp
+```
### Demo 演示
#### 1. 初始配置
-![tenant manage](images/ds_tenant_manage.png)
-在 DolphinScheduler Web中创建租户,进入租户管理界面,填写有 sudo 权限的操作系统用户,queue 可以使用 default。docker容器内可直接使用root用户。
+在 DolphinScheduler Web中创建租户,进入租户管理界面,填写**有 sudo 权限的操作系统用户**,queue 可以使用 default。docker容器内可直接使用root用户。
+
+![create tenant](images/ds_create_tenant.png)
再绑定租户到用户,简单起见,我们直接绑定到 admin 用户。进入用户管理页面,点击编辑admin用户。
+
![bind tenant](images/ds_bind_tenant.png)
+
绑定后,用户状态类似下图。
![bind status](images/ds_bind_status.png)
@@ -97,16 +112,21 @@ Python task 需要显式设置 Python 环境,最简单的办法是在bin/env/d
DolphinScheduler 中,需要先创建项目,再在项目中创建工作流。
所以,首先创建一个test项目,如下图所示,点击创建项目并进入项目。
+
![create project](images/ds_create_project.png)
+
![project](images/ds_project.png)
进入项目后,导入下载好的工作流文件。如下图所示,在工作流定义界面点击导入工作流。
+
![import workflow](images/ds_import_workflow.png)
导入后,工作流列表中将出现该工作流,类似下图。
+
![workflow list](images/ds_workflow_list.png)
点击该工作流名字,可查看工作流的详细内容,如下图所示。
+
![workflow detail](images/ds_workflow_detail.png)
**注意**,此处需要一点修改,因为导入工作流后task 的 ID 会有变化。特别的,switch task 中的上游和下游 id 都不会存在,需要手动改一下。
@@ -119,11 +139,13 @@ DolphinScheduler 中,需要先创建项目,再在项目中创建工作流。
![right](images/ds_switch_right.png)
修改完成后,直接保存该工作流。导入的工作流中 tenant 默认会是 default,也是**可以运行**的。如果你想指定自己的租户,请在保存工作流时选择租户,如下图所示。
+
![set tenant](images/ds_set_tenant.png)
#### 3. 上线运行
工作流保存后,需要先上线再运行。上线后,运行按钮才会点亮。如下图所示。
+
![run](images/ds_run.png)
点击运行后,等待工作流完成。可在工作流实例(Workflow Instance)界面,查看工作流运行详情,如下图所示。
@@ -142,4 +164,17 @@ curl -X POST 127.0.0.1:8881/predict -d '{"ip": 114904,
"is_attributed": 0}'
```
返回结果如下:
+
![predict](images/ds_predict.png)
+
+#### 补充
+
+如果重复运行工作流,`deploy sql` task 可能因deployment`demo`已存在而失败,请在再次运行工作流前,在docker容器中删除该deployment:
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --database=demo_db --interactive=false --cmd="drop deployment demo;"
+```
+
+可通过以下命令确认deployment是否已经删除:
+```
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client --database=demo_db --interactive=false --cmd="show deployment demo;"
+```
\ No newline at end of file
diff --git a/docs/zh/use_case/images/Byzer_Notebook.jpg b/docs/zh/use_case/images/Byzer_Notebook.jpg
new file mode 100644
index 00000000000..18ae0f85739
Binary files /dev/null and b/docs/zh/use_case/images/Byzer_Notebook.jpg differ
diff --git a/docs/zh/use_case/images/add_connection.png b/docs/zh/use_case/images/add_connection.png
new file mode 100644
index 00000000000..50cd41d16ff
Binary files /dev/null and b/docs/zh/use_case/images/add_connection.png differ
diff --git a/docs/zh/use_case/images/airflow_dag.png b/docs/zh/use_case/images/airflow_dag.png
new file mode 100644
index 00000000000..ad2bd6193e2
Binary files /dev/null and b/docs/zh/use_case/images/airflow_dag.png differ
diff --git a/docs/zh/use_case/images/airflow_login.png b/docs/zh/use_case/images/airflow_login.png
new file mode 100644
index 00000000000..03d58db49a9
Binary files /dev/null and b/docs/zh/use_case/images/airflow_login.png differ
diff --git a/docs/zh/use_case/images/airflow_test_result.png b/docs/zh/use_case/images/airflow_test_result.png
new file mode 100644
index 00000000000..75d4efc9c66
Binary files /dev/null and b/docs/zh/use_case/images/airflow_test_result.png differ
diff --git a/docs/zh/use_case/images/connection.png b/docs/zh/use_case/images/connection.png
new file mode 100644
index 00000000000..d0383aef2dc
Binary files /dev/null and b/docs/zh/use_case/images/connection.png differ
diff --git a/docs/zh/use_case/images/connection_display.png b/docs/zh/use_case/images/connection_display.png
new file mode 100644
index 00000000000..05726e821a4
Binary files /dev/null and b/docs/zh/use_case/images/connection_display.png differ
diff --git a/docs/zh/use_case/images/connection_settings.png b/docs/zh/use_case/images/connection_settings.png
new file mode 100644
index 00000000000..c739c61f71e
Binary files /dev/null and b/docs/zh/use_case/images/connection_settings.png differ
diff --git a/docs/zh/use_case/images/dag_code.png b/docs/zh/use_case/images/dag_code.png
new file mode 100644
index 00000000000..86f2289a0a5
Binary files /dev/null and b/docs/zh/use_case/images/dag_code.png differ
diff --git a/docs/zh/use_case/images/dag_home.png b/docs/zh/use_case/images/dag_home.png
new file mode 100644
index 00000000000..00a6ed33c53
Binary files /dev/null and b/docs/zh/use_case/images/dag_home.png differ
diff --git a/docs/zh/use_case/images/dag_run.png b/docs/zh/use_case/images/dag_run.png
new file mode 100644
index 00000000000..d072e4f8792
Binary files /dev/null and b/docs/zh/use_case/images/dag_run.png differ
diff --git a/docs/zh/use_case/images/ds_bind_status.png b/docs/zh/use_case/images/ds_bind_status.png
index 2023247c12f..42ebeea6c90 100644
Binary files a/docs/zh/use_case/images/ds_bind_status.png and b/docs/zh/use_case/images/ds_bind_status.png differ
diff --git a/docs/zh/use_case/images/ds_create_tenant.png b/docs/zh/use_case/images/ds_create_tenant.png
new file mode 100644
index 00000000000..88a56fd58c0
Binary files /dev/null and b/docs/zh/use_case/images/ds_create_tenant.png differ
diff --git a/docs/zh/use_case/images/ds_set_tenant.png b/docs/zh/use_case/images/ds_set_tenant.png
index 08388d4e9d8..d6f94bd6b08 100644
Binary files a/docs/zh/use_case/images/ds_set_tenant.png and b/docs/zh/use_case/images/ds_set_tenant.png differ
diff --git a/docs/zh/use_case/images/ds_tenant_manage.png b/docs/zh/use_case/images/ds_tenant_manage.png
deleted file mode 100644
index 0f221e6e048..00000000000
Binary files a/docs/zh/use_case/images/ds_tenant_manage.png and /dev/null differ
diff --git a/docs/zh/use_case/index.rst b/docs/zh/use_case/index.rst
index 0025fba281b..66faefd89f4 100644
--- a/docs/zh/use_case/index.rst
+++ b/docs/zh/use_case/index.rst
@@ -10,3 +10,6 @@
kafka_connector_demo
dolphinscheduler_task_demo
talkingdata_demo
+ OpenMLDB_Byzer_taxi
+ airflow_provider_demo
+ JD_recommendation
diff --git a/docs/zh/use_case/kafka_connector_demo.md b/docs/zh/use_case/kafka_connector_demo.md
index 43ddfa1f036..68b03fe4c81 100644
--- a/docs/zh/use_case/kafka_connector_demo.md
+++ b/docs/zh/use_case/kafka_connector_demo.md
@@ -21,7 +21,7 @@ OpenMLDB Kafka Connector实现见[extensions/kafka-connect-jdbc](https://github.
我们推荐你将下载的三个文件包都绑定到文件目录`kafka`。当然,也可以在启动容器后,再进行文件包的下载。我们假设文件包都在`/work/kafka`目录中。
```
-docker run -it -v `pwd`:/work/kafka --name openmldb 4pdosc/openmldb:0.5.2 bash
+docker run -it -v `pwd`:/work/kafka --name openmldb 4pdosc/openmldb:0.6.3 bash
```
### 流程
diff --git a/docs/zh/use_case/pulsar_connector_demo.md b/docs/zh/use_case/pulsar_connector_demo.md
index 42b931ba827..f843b6a46f1 100644
--- a/docs/zh/use_case/pulsar_connector_demo.md
+++ b/docs/zh/use_case/pulsar_connector_demo.md
@@ -13,7 +13,7 @@ Apache Pulsar是一个云原生的,分布式消息流平台。它可以作为O
### 下载
-- 你需要下载本文中所需要的所有文件,请点击[files](https://github.com/vagetablechicken/pulsar-openmldb-connector-demo/releases/download/v0.1/files.tar.gz)下载。文件包括connector包,schema文件,配置文件等等。
+- 你需要下载本文中所需要的所有文件,请点击[files](https://openmldb.ai/download/pulsar-connector/files.tar.gz)下载。文件包括connector包,schema文件,配置文件等等。
- 如果你只想要下载connector包用于自己的项目,请点击[connector snapshot](https://github.com/4paradigm/OpenMLDB/releases/download/v0.4.4/pulsar-io-jdbc-openmldb-2.11.0-SNAPSHOT.nar)。
### 流程
@@ -35,7 +35,7 @@ Apache Pulsar是一个云原生的,分布式消息流平台。它可以作为O
```
我们更推荐你使用‘host network’模式运行docker,以及绑定文件目录‘files’,sql脚本在该目录中。
```
-docker run -dit --network host -v `pwd`/files:/work/taxi-trip/files --name openmldb 4pdosc/openmldb:0.5.2 bash
+docker run -dit --network host -v `pwd`/files:/work/pulsar_files --name openmldb 4pdosc/openmldb:0.6.3 bash
docker exec -it openmldb bash
```
@@ -57,7 +57,7 @@ desc connector_test;
```
执行脚本:
```
-../openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < files/create.sql
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /work/pulsar_files/create.sql
```
![table desc](images/table.png)
@@ -206,6 +206,6 @@ select *, string(timestamp(pickup_datetime)), string(timestamp(dropoff_datetime)
```
在OpenMLDB容器中执行脚本:
```
-../openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < files/select.sql
+/work/openmldb/bin/openmldb --zk_cluster=127.0.0.1:2181 --zk_root_path=/openmldb --role=sql_client < /work/pulsar_files/select.sql
```
![openmldb result](images/openmldb_result.png)
diff --git a/docs/zh/use_case/talkingdata_demo.md b/docs/zh/use_case/talkingdata_demo.md
index c6af4681cf4..dbd76dc6e58 100755
--- a/docs/zh/use_case/talkingdata_demo.md
+++ b/docs/zh/use_case/talkingdata_demo.md
@@ -13,7 +13,7 @@
**启动 Docker**
```
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
#### 在本地运行
diff --git a/docs/zh/use_case/taxi_tour_duration_prediction.md b/docs/zh/use_case/taxi_tour_duration_prediction.md
index 69d34c60e73..c7d35bc33ac 100644
--- a/docs/zh/use_case/taxi_tour_duration_prediction.md
+++ b/docs/zh/use_case/taxi_tour_duration_prediction.md
@@ -12,7 +12,7 @@
- 拉取 OpenMLDB docker 镜像,并且运行相应容器:
```bash
-docker run -it 4pdosc/openmldb:0.5.2 bash
+docker run -it 4pdosc/openmldb:0.6.3 bash
```
该镜像预装了OpenMLDB,并预置了本案例所需要的所有脚本、三方库、开源工具以及训练数据。
diff --git a/extensions/airflow-provider-openmldb/.gitignore b/extensions/airflow-provider-openmldb/.gitignore
new file mode 100644
index 00000000000..5fef95f5b38
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/.gitignore
@@ -0,0 +1,144 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Python template
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+.vscode/
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+.idea
diff --git a/extensions/airflow-provider-openmldb/README.md b/extensions/airflow-provider-openmldb/README.md
new file mode 100644
index 00000000000..e9480ba25b8
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/README.md
@@ -0,0 +1,47 @@
+
+ Airflow OpenMLDB Provider
+
+
+
+
+# Overview
+
+Airflow OpenMLDB Provider supports connecting to OpenMLDB. Specifically, connect to the OpenMLDB API Server.
+
+Operators:
+- OpenMLDBLoadDataOperator
+- OpenMLDBSelectIntoOperator
+- OpenMLDBDeployOperator
+- OpenMLDBSQLOperator: the underlying implementation of operators above. Support all sql.
+
+Only operators and a hook, no sensors.
+
+# Build
+
+To build openmldb provider, follow the steps below:
+
+1. Clone the repo.
+2. `cd` into provider directory.
+3. Run `python3 -m pip install build`.
+4. Run `python3 -m build` to build the wheel.
+5. Find the .whl file in `/dist/*.whl`.
+
+# How to use
+
+Write the dag, using openmldb operators, ref [simple openmldb operator dag example](https://github.com/4paradigm/OpenMLDB/blob/main/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb.py).
+
+Create the connection in airflow, the name is `openmldb_conn_id` you set.
+
+Trigger the dag.
+
+## Test Way
+
+Add connection:
+```
+airflow connections add openmldb_conn_id --conn-uri http://127.0.0.1:9080
+airflow connections list --conn-id openmldb_conn_id
+```
+Dag test:
+```
+ airflow dags test example_openmldb_complex 2022-08-25
+```
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/__init__.py b/extensions/airflow-provider-openmldb/openmldb_provider/__init__.py
new file mode 100644
index 00000000000..a43a6754756
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/__init__.py
@@ -0,0 +1,13 @@
+# # This is needed to allow Airflow to pick up specific metadata fields it needs for certain features. We recognize
+# it's a bit unclean to define these in multiple places, but at this point it's the only workaround if you'd like
+# your custom conn type to show up in the Airflow UI.
+def get_provider_info():
+ return {
+ "package-name": "airflow-provider-openmldb", # Required
+ "name": "OpenMLDB Airflow Provider", # Required
+ "description": "an airflow provider to connect OpenMLDB", # Required
+ "hook-class-names": ["openmldb_provider.hooks.openmldb_hook.OpenMLDBHook"], # for airflow<2.2
+ # "connection-types"
+ "extra-links": ["openmldb_provider.operators.openmldb_operator.ExtraLink"], # unused
+ "versions": ["0.0.1"] # Required
+ }
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/__init__.py b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb.py b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb.py
new file mode 100644
index 00000000000..7229b5ef796
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb.py
@@ -0,0 +1,64 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Example use of OpenMLDB related operators.
+"""
+import os
+from datetime import datetime
+
+from airflow.models.dag import DAG
+
+from openmldb_provider.operators.openmldb_operator import (
+ Mode,
+ OpenMLDBLoadDataOperator,
+ OpenMLDBSelectIntoOperator,
+)
+
+PATH_TO_DATA_FILE = os.environ.get('OPENMLDB_PATH_TO_DATA_FILE', '/tmp/example-text.txt')
+ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
+DAG_ID = "example_openmldb"
+
+with DAG(
+ dag_id=DAG_ID,
+ start_date=datetime(2021, 1, 1),
+ default_args={'openmldb_conn_id': 'openmldb_conn_id'},
+ max_active_runs=1,
+ tags=['example'],
+ catchup=False,
+) as dag:
+ database = "example_db"
+ table = "example_table"
+
+ # [START load_data_and_extract_feature_offline]
+ load_data = OpenMLDBLoadDataOperator(
+ task_id='load-data',
+ db=database,
+ mode=Mode.OFFSYNC,
+ table=table,
+ file=PATH_TO_DATA_FILE,
+ options="mode='overwrite'",
+ )
+
+ feature_extract = OpenMLDBSelectIntoOperator(
+ task_id='feature-extract',
+ db=database,
+ mode=Mode.OFFSYNC,
+ sql=f"select * from {table}",
+ file="/tmp/feature_data",
+ options="mode='overwrite'",
+ )
+ # [END load_data_and_extract_feature_offline]
+
+ load_data >> feature_extract
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb_complex.py b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb_complex.py
new file mode 100644
index 00000000000..09e280169d3
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/example_openmldb_complex.py
@@ -0,0 +1,131 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Example use of OpenMLDB related operators.
+"""
+import os
+from datetime import datetime
+
+from airflow.models.dag import DAG
+from airflow.operators.python import PythonOperator, BranchPythonOperator
+from openmldb_provider.operators.openmldb_operator import (
+ Mode,
+ OpenMLDBLoadDataOperator,
+ OpenMLDBSelectIntoOperator, OpenMLDBSQLOperator, OpenMLDBDeployOperator,
+)
+
+import xgboost_train_sample
+
+# cp example_dags/train_sample.csv to /tmp first
+PATH_TO_DATA_FILE = os.environ.get('OPENMLDB_PATH_TO_DATA_FILE', '/tmp/train_sample.csv')
+ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
+DAG_ID = "example_openmldb_complex"
+
+with DAG(
+ dag_id=DAG_ID,
+ start_date=datetime(2021, 1, 1),
+ default_args={'openmldb_conn_id': 'openmldb_conn_id'},
+ max_active_runs=1,
+ tags=['example'],
+ catchup=False,
+) as dag:
+ database = "example_db"
+ table = "example_table"
+
+ create_database = OpenMLDBSQLOperator(
+ task_id='create-db',
+ db=database, mode=Mode.OFFSYNC,
+ sql=f'create database if not exists {database}'
+ )
+
+ create_table = OpenMLDBSQLOperator(
+ task_id='create-table',
+ db=database, mode=Mode.OFFSYNC,
+ sql=f'create table if not exists {table}(ip int, app int, device int, os int, channel int, '
+ f'click_time timestamp, is_attributed int)'
+ )
+
+ # [START load_data_and_extract_feature_offline]
+ load_data = OpenMLDBLoadDataOperator(
+ task_id='load-data',
+ db=database,
+ mode=Mode.OFFSYNC,
+ table=table,
+ file=PATH_TO_DATA_FILE,
+ options="mode='overwrite'",
+ )
+
+ sql = f"SELECT is_attributed, app, device, os, channel, hour(click_time) as hour, day(click_time) as day, " \
+ f"count(channel) over w1 as qty " \
+ f"FROM {table} " \
+ f"WINDOW " \
+ f"w1 as(partition by ip order by click_time ROWS_RANGE BETWEEN 1h PRECEDING AND CURRENT ROW)"
+
+ feature_path = "/tmp/feature_data"
+ feature_extract = OpenMLDBSelectIntoOperator(
+ task_id='feature-extract',
+ db=database,
+ mode=Mode.OFFSYNC,
+ sql=sql,
+ file=feature_path,
+ options="mode='overwrite'",
+ )
+ # [END load_data_and_extract_feature_offline]
+
+ model_path = "/tmp/model.json"
+ # return auc
+ train = PythonOperator(task_id="train",
+ python_callable=xgboost_train_sample.train_task,
+ op_args=[f"{feature_path}/*.csv", model_path], )
+
+
+ def branch_func(**kwargs):
+ ti = kwargs['ti']
+ xcom_value = int(ti.xcom_pull(task_ids='train'))
+ if xcom_value >= 99.0:
+ return "deploy-sql"
+ else:
+ return "fail-report"
+
+
+ branching = BranchPythonOperator(
+ task_id="branching",
+ python_callable=branch_func,
+ )
+
+ predict_server = "127.0.0.1:8881"
+ deploy_name = "demo"
+
+ # success: deploy sql and model
+ deploy_sql = OpenMLDBDeployOperator(task_id="deploy-sql", db=database, deploy_name=deploy_name, sql=sql, )
+
+
+ def update_req():
+ import requests
+ requests.post('http://' + predict_server + '/update', json={
+ 'database': database,
+ 'deployment': deploy_name, 'model_path': model_path
+ })
+
+
+ deploy = PythonOperator(task_id="deploy", python_callable=update_req)
+
+ deploy_sql >> deploy
+
+ # fail: report
+ fail_report = PythonOperator(task_id="fail-report", python_callable=lambda: print('fail'))
+
+ create_database >> create_table >> load_data >> feature_extract >> train >> branching >> [deploy_sql,
+ fail_report]
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/train_sample.csv b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/train_sample.csv
new file mode 100644
index 00000000000..ab6b78b42e1
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/train_sample.csv
@@ -0,0 +1,10001 @@
+ip,app,device,os,channel,click_time,is_attributed
+106385,12,1,19,178,2017-11-08 06:30:54,0
+114066,3,1,13,489,2017-11-07 07:51:29,0
+189805,12,1,23,105,2017-11-07 12:30:07,0
+107100,18,1,20,107,2017-11-08 04:31:19,0
+5314,3,1,19,409,2017-11-07 11:21:29,0
+48996,3,1,19,205,2017-11-07 03:38:20,0
+83698,26,1,19,266,2017-11-09 05:01:42,0
+5147,22,1,15,116,2017-11-09 08:38:30,0
+145086,12,1,22,242,2017-11-07 06:30:30,0
+61400,3,1,18,137,2017-11-08 23:58:43,0
+66371,23,1,19,153,2017-11-07 14:29:15,0
+73339,11,1,19,487,2017-11-09 02:44:56,0
+208618,14,1,13,439,2017-11-08 20:34:03,0
+83260,12,1,13,265,2017-11-08 08:33:44,0
+7567,1,1,53,150,2017-11-07 16:02:32,0
+3994,9,1,19,244,2017-11-09 10:59:09,0
+45362,15,1,28,315,2017-11-07 16:38:09,0
+85172,15,1,13,315,2017-11-08 23:12:06,0
+137258,21,1,8,232,2017-11-09 01:49:02,0
+103132,1,1,22,377,2017-11-07 01:10:54,0
+25039,1,1,32,452,2017-11-09 12:12:18,0
+19537,3,1,47,442,2017-11-07 03:20:32,0
+98380,25,1,9,259,2017-11-07 14:45:33,0
+13963,18,1,3,134,2017-11-07 02:39:45,0
+86767,9,1,8,244,2017-11-08 04:00:35,0
+18994,15,1,19,245,2017-11-06 23:37:20,0
+211425,2,1,41,219,2017-11-07 02:58:40,0
+38300,12,1,19,178,2017-11-07 23:29:34,0
+75504,2,1,19,237,2017-11-08 07:25:23,0
+90607,14,1,13,379,2017-11-09 05:01:14,0
+120361,14,1,14,401,2017-11-07 07:21:39,0
+100408,12,1,19,140,2017-11-09 05:05:13,0
+196790,3,1,19,137,2017-11-08 08:44:16,0
+77048,12,1,36,265,2017-11-08 02:34:34,0
+87653,18,1,19,121,2017-11-09 00:34:39,0
+112442,14,1,18,379,2017-11-07 20:15:17,0
+4573,2,1,19,236,2017-11-09 06:52:43,0
+112215,14,1,13,401,2017-11-07 03:41:09,0
+17149,12,1,22,140,2017-11-07 13:24:16,0
+8964,18,2,18,30,2017-11-09 07:12:37,0
+79827,15,1,18,245,2017-11-07 14:48:18,0
+37161,18,1,13,121,2017-11-08 10:36:43,0
+118930,94,1,19,361,2017-11-08 02:38:37,0
+70551,21,1,53,128,2017-11-07 00:53:50,0
+280687,3,1,17,135,2017-11-09 08:10:36,0
+24951,9,1,37,466,2017-11-08 12:30:06,0
+17077,26,1,31,477,2017-11-08 23:49:50,0
+88772,14,1,22,489,2017-11-07 04:25:40,0
+127628,12,1,13,259,2017-11-09 08:33:05,0
+166206,9,1,13,232,2017-11-09 12:46:34,0
+59763,20,1,19,478,2017-11-09 02:38:37,0
+31463,12,1,16,328,2017-11-09 10:12:29,0
+87349,12,1,13,328,2017-11-07 21:04:49,0
+7664,3,1,47,280,2017-11-07 04:20:35,0
+32130,3,1,53,280,2017-11-07 01:11:53,0
+116785,26,1,13,121,2017-11-07 12:44:13,0
+213380,29,1,19,347,2017-11-09 12:58:44,0
+7210,3,1,10,280,2017-11-08 11:33:38,0
+120753,9,1,8,334,2017-11-08 05:20:28,0
+31572,26,1,19,266,2017-11-09 14:14:48,0
+120962,12,1,13,245,2017-11-06 17:04:32,0
+24067,2,1,16,237,2017-11-09 02:47:07,0
+53964,3,1,18,280,2017-11-08 02:40:50,0
+112816,13,1,13,477,2017-11-09 01:42:38,0
+11852,1,1,19,125,2017-11-07 10:52:55,0
+71093,3,1,30,173,2017-11-07 12:15:50,0
+65716,23,1,13,153,2017-11-09 03:05:33,0
+95962,3,1,41,409,2017-11-08 19:37:22,0
+24278,15,1,19,430,2017-11-07 06:41:57,0
+107513,22,1,13,496,2017-11-07 08:04:53,0
+109537,26,1,19,266,2017-11-07 05:49:11,0
+107954,9,1,70,234,2017-11-06 16:54:21,0
+5314,3,1,6,424,2017-11-09 00:57:56,0
+89680,9,1,34,244,2017-11-07 03:20:35,0
+176923,3,1,19,205,2017-11-07 00:37:21,0
+73487,12,1,12,326,2017-11-08 05:21:43,0
+5314,2,1,2,477,2017-11-09 00:14:56,0
+97773,12,1,25,140,2017-11-09 08:07:01,0
+112291,15,1,13,412,2017-11-07 01:45:21,0
+74013,18,1,19,121,2017-11-09 12:59:53,0
+45275,2,1,13,469,2017-11-08 07:50:57,0
+83659,15,1,19,278,2017-11-07 04:11:17,0
+12505,2,1,19,205,2017-11-07 02:45:04,0
+203175,2,1,19,435,2017-11-07 15:04:32,0
+66033,18,1,18,107,2017-11-09 12:03:17,0
+20215,12,2,17,409,2017-11-09 01:58:38,0
+88295,14,1,6,439,2017-11-09 04:04:21,0
+75885,8,1,10,145,2017-11-08 11:48:36,0
+23035,9,1,13,445,2017-11-07 04:17:06,0
+46677,9,1,19,107,2017-11-09 06:43:19,0
+123596,1,1,2,153,2017-11-09 09:43:28,0
+179026,18,1,13,107,2017-11-07 03:10:50,0
+258385,11,1,19,137,2017-11-09 07:17:26,0
+80140,12,1,27,145,2017-11-08 23:36:31,0
+41725,9,1,19,466,2017-11-08 09:35:36,0
+242240,15,1,13,480,2017-11-07 23:33:12,0
+84916,12,1,13,259,2017-11-08 08:17:14,0
+66767,1,1,13,17,2017-11-07 00:57:08,0
+332563,9,1,19,334,2017-11-08 16:56:04,0
+73516,9,2,13,453,2017-11-09 11:11:28,0
+108103,2,1,13,477,2017-11-07 07:43:45,0
+195332,25,1,15,259,2017-11-07 12:46:10,0
+53418,2,1,12,377,2017-11-06 21:54:17,0
+104512,9,1,10,258,2017-11-08 10:00:17,0
+114276,9,1,19,442,2017-11-08 09:56:56,0
+74816,27,1,13,153,2017-11-08 02:30:13,0
+98521,15,1,16,315,2017-11-08 11:28:36,0
+119930,2,1,19,219,2017-11-07 18:25:23,0
+144687,21,1,19,128,2017-11-07 15:37:12,0
+60348,9,1,13,445,2017-11-07 02:47:47,0
+112302,7,1,35,101,2017-11-09 12:54:07,0
+120883,15,1,8,430,2017-11-08 08:19:05,0
+44744,3,1,13,421,2017-11-09 11:13:33,0
+209929,25,1,13,259,2017-11-07 12:15:41,0
+55569,1,1,13,150,2017-11-09 07:00:08,0
+141432,9,1,19,334,2017-11-09 03:28:29,0
+267177,2,1,13,219,2017-11-08 09:32:57,0
+60083,3,1,9,424,2017-11-09 03:47:14,0
+193346,18,1,18,107,2017-11-08 00:48:47,0
+93021,12,1,19,265,2017-11-07 01:23:56,0
+65253,13,2,9,400,2017-11-07 08:07:07,0
+37752,18,1,28,439,2017-11-08 11:14:02,0
+26927,9,1,13,134,2017-11-07 00:23:44,0
+5185,9,1,32,466,2017-11-09 08:23:48,0
+106294,55,1,13,453,2017-11-07 02:09:04,0
+27090,3,1,15,442,2017-11-08 09:26:50,0
+171274,26,1,15,266,2017-11-09 12:12:39,0
+64609,14,1,17,463,2017-11-09 09:45:50,0
+208347,15,1,13,111,2017-11-09 12:27:57,0
+126786,15,1,53,245,2017-11-07 15:16:22,0
+69503,12,1,19,265,2017-11-08 11:19:01,0
+41025,14,1,17,489,2017-11-07 10:06:56,0
+47168,12,1,14,265,2017-11-08 06:30:13,0
+9844,9,2,18,258,2017-11-09 14:12:32,0
+43243,12,1,18,328,2017-11-07 15:26:52,0
+7304,3,1,19,135,2017-11-09 14:28:48,0
+118903,28,1,4,135,2017-11-08 03:11:58,0
+173678,29,1,13,343,2017-11-07 10:38:47,0
+118647,26,1,18,266,2017-11-09 15:01:35,0
+28089,12,1,14,178,2017-11-08 12:29:02,0
+38233,18,1,19,107,2017-11-09 02:29:18,0
+9849,15,1,19,315,2017-11-07 12:57:41,0
+91536,2,2,6,205,2017-11-08 15:21:56,0
+77112,2,1,17,236,2017-11-09 08:28:34,0
+44926,18,1,19,134,2017-11-07 04:11:24,0
+201786,12,1,13,497,2017-11-09 04:09:26,0
+86231,11,1,14,481,2017-11-09 09:27:29,0
+6395,3,1,13,379,2017-11-06 23:26:20,0
+79851,12,1,19,328,2017-11-06 19:55:20,0
+72936,151,0,76,347,2017-11-07 13:40:45,0
+114379,25,1,8,259,2017-11-08 13:03:53,0
+112880,18,1,19,107,2017-11-09 03:55:33,0
+231915,13,1,17,400,2017-11-08 13:06:39,0
+8292,9,1,22,334,2017-11-07 08:26:05,0
+313388,3,1,13,280,2017-11-09 00:18:12,0
+57191,23,1,19,153,2017-11-09 10:28:43,0
+5314,12,1,17,497,2017-11-07 11:10:39,0
+137652,3,1,19,115,2017-11-07 13:11:43,0
+32937,18,1,6,107,2017-11-06 16:24:27,0
+83306,14,1,19,467,2017-11-09 08:43:26,0
+73839,3,1,22,480,2017-11-09 06:12:29,0
+37628,15,1,13,245,2017-11-08 15:08:10,0
+34947,12,1,19,265,2017-11-07 12:10:09,0
+41369,12,1,25,178,2017-11-07 11:12:14,0
+106385,3,1,13,280,2017-11-09 03:01:25,0
+109634,28,1,6,135,2017-11-09 14:44:36,0
+82050,12,1,13,328,2017-11-07 02:45:33,0
+7645,3,1,19,280,2017-11-06 16:17:12,0
+107594,64,1,18,459,2017-11-07 15:45:42,0
+122777,3,1,58,424,2017-11-09 07:40:53,0
+205644,18,1,13,107,2017-11-07 02:40:35,0
+138245,25,1,25,259,2017-11-07 11:46:43,0
+118229,12,2,2,265,2017-11-09 14:02:06,0
+114904,2,1,19,205,2017-11-07 08:57:54,0
+181182,18,1,13,107,2017-11-07 15:53:01,0
+7815,3,1,19,442,2017-11-08 10:22:55,0
+55910,2,1,17,364,2017-11-06 23:25:59,0
+173196,13,1,23,477,2017-11-07 03:08:10,0
+332197,12,2,19,135,2017-11-09 14:57:36,0
+66985,3,1,20,137,2017-11-06 17:48:39,0
+164865,24,1,10,105,2017-11-07 12:32:38,0
+54481,9,1,30,232,2017-11-06 22:49:38,0
+194117,15,1,18,245,2017-11-07 21:24:05,0
+58556,2,2,22,237,2017-11-07 02:25:55,0
+235903,26,1,10,121,2017-11-07 17:25:00,0
+109676,9,1,15,127,2017-11-09 13:42:10,0
+110095,12,1,19,245,2017-11-07 16:21:34,0
+91336,3,1,13,280,2017-11-09 04:55:48,0
+76853,2,1,18,435,2017-11-08 02:16:25,0
+5509,9,1,19,442,2017-11-09 08:58:05,0
+33777,27,1,15,122,2017-11-08 11:54:07,0
+26826,12,1,13,259,2017-11-09 11:47:27,0
+80560,13,1,22,477,2017-11-09 05:43:49,0
+96266,8,1,13,145,2017-11-09 10:48:45,0
+78910,12,1,13,245,2017-11-06 19:26:47,0
+10265,15,1,9,245,2017-11-07 08:58:45,0
+23733,2,1,32,219,2017-11-06 17:52:32,0
+141490,5,1,13,377,2017-11-07 14:39:19,0
+88541,21,1,25,128,2017-11-06 18:51:40,0
+92289,9,1,16,258,2017-11-09 12:49:58,0
+80571,14,1,19,401,2017-11-07 03:54:44,0
+15218,12,1,6,265,2017-11-08 08:34:05,0
+67577,26,1,11,266,2017-11-08 09:52:54,0
+22349,12,1,32,219,2017-11-07 03:42:58,0
+67409,15,1,6,430,2017-11-09 03:14:27,0
+39453,3,1,19,280,2017-11-07 08:02:10,0
+88255,2,1,13,122,2017-11-07 14:01:59,0
+28272,18,1,19,439,2017-11-09 01:41:16,0
+71579,3,1,9,130,2017-11-06 17:24:18,0
+46939,15,1,13,379,2017-11-09 00:58:52,0
+46740,2,1,9,236,2017-11-08 13:28:37,0
+146536,23,1,37,153,2017-11-08 13:13:03,0
+27303,15,1,18,245,2017-11-08 23:36:18,0
+101445,9,1,4,127,2017-11-09 15:41:07,0
+30636,12,1,13,265,2017-11-06 22:21:30,0
+18122,2,1,13,122,2017-11-08 04:48:08,0
+11448,15,1,25,315,2017-11-07 13:46:35,0
+109776,15,1,19,140,2017-11-08 11:47:20,0
+49383,12,1,22,265,2017-11-08 11:04:15,0
+76503,150,1,10,110,2017-11-07 04:27:36,0
+170136,2,1,25,237,2017-11-09 04:53:45,0
+32432,5,1,41,377,2017-11-07 16:08:45,0
+5178,14,1,18,401,2017-11-08 23:43:29,0
+16993,15,1,11,245,2017-11-08 06:01:53,0
+146904,18,1,11,107,2017-11-08 08:44:36,0
+109735,9,1,6,134,2017-11-09 03:19:10,0
+212602,95,3032,607,347,2017-11-07 13:49:47,0
+77048,15,1,19,245,2017-11-07 22:20:57,0
+77610,17,1,37,134,2017-11-09 02:08:09,0
+39884,18,1,3,107,2017-11-09 02:30:01,0
+188452,12,1,13,245,2017-11-08 15:42:34,0
+103598,3,1,13,280,2017-11-08 07:08:04,0
+126105,3,1,8,135,2017-11-08 12:17:10,0
+63408,2,1,3,435,2017-11-08 00:27:38,0
+100275,1,1,3,115,2017-11-08 16:33:59,0
+262468,3,1,19,280,2017-11-08 07:39:20,0
+60191,2,1,19,205,2017-11-09 04:37:12,0
+201182,3,1,6,489,2017-11-08 03:08:30,0
+111153,15,1,20,3,2017-11-09 14:36:58,0
+32453,3,2,9,280,2017-11-08 02:19:07,0
+27714,3,1,15,173,2017-11-07 14:49:29,0
+15843,14,1,19,480,2017-11-08 09:31:17,0
+149535,8,1,25,259,2017-11-07 01:04:29,0
+45785,105,1,10,282,2017-11-07 02:04:06,0
+41181,2,1,19,469,2017-11-07 22:04:10,0
+59426,23,1,19,153,2017-11-08 10:35:34,0
+56313,2,1,25,469,2017-11-07 09:33:47,0
+74034,22,1,12,116,2017-11-08 13:32:55,0
+67628,2,1,13,452,2017-11-07 18:03:38,0
+18676,11,1,16,487,2017-11-07 08:40:07,0
+184702,2,1,19,435,2017-11-07 04:13:27,0
+973,12,1,13,178,2017-11-09 02:35:38,0
+276450,3,1,41,211,2017-11-08 13:21:23,0
+102581,2,1,13,212,2017-11-08 08:33:10,0
+73997,2,1,16,219,2017-11-08 02:39:13,0
+202464,14,1,9,439,2017-11-06 17:47:11,0
+91128,9,1,11,145,2017-11-08 11:09:41,0
+69185,14,1,19,349,2017-11-07 23:34:20,0
+117651,2,1,13,477,2017-11-07 07:31:17,0
+119818,3,1,19,280,2017-11-09 00:07:23,0
+38300,11,1,8,481,2017-11-08 10:44:12,0
+105475,3,1,13,409,2017-11-09 05:00:20,0
+178873,12,1,9,424,2017-11-06 16:57:06,0
+53341,28,1,22,135,2017-11-08 03:01:00,0
+59847,3,1,8,280,2017-11-08 14:28:36,0
+61120,21,1,17,128,2017-11-08 23:41:06,0
+139490,21,1,19,128,2017-11-07 03:38:12,0
+37437,2,1,19,236,2017-11-08 12:42:09,0
+89141,1,1,25,134,2017-11-09 12:28:35,0
+125288,3,1,19,280,2017-11-08 15:23:26,0
+137030,2,1,13,122,2017-11-07 05:49:12,0
+67868,12,1,13,245,2017-11-08 20:32:48,0
+60017,14,1,18,442,2017-11-07 04:11:20,0
+114276,9,1,2,442,2017-11-08 14:40:53,0
+186799,64,2,13,459,2017-11-07 17:46:01,0
+172498,18,1,19,439,2017-11-07 05:10:29,0
+8292,15,1,27,412,2017-11-09 04:04:45,0
+24876,7,1,15,101,2017-11-09 14:36:00,0
+66437,24,1,8,105,2017-11-07 09:29:47,0
+58978,23,1,19,30,2017-11-08 02:56:52,0
+90814,13,1,19,477,2017-11-07 12:51:22,0
+49479,1,1,37,153,2017-11-08 09:15:57,0
+42596,2,1,18,477,2017-11-09 05:44:58,0
+103090,15,1,22,3,2017-11-08 15:46:12,0
+44692,12,1,8,409,2017-11-07 06:39:54,0
+52024,2,1,19,212,2017-11-09 01:48:26,0
+37919,13,1,13,477,2017-11-09 00:31:27,0
+85085,15,1,19,278,2017-11-09 11:01:01,0
+47422,13,1,13,477,2017-11-07 03:57:15,0
+36150,12,1,19,178,2017-11-08 10:47:19,0
+78021,14,1,19,439,2017-11-08 06:29:18,0
+2538,20,1,22,259,2017-11-08 15:25:05,0
+101487,20,1,19,478,2017-11-09 02:24:06,0
+160734,12,1,41,245,2017-11-09 06:31:38,0
+75794,10,1,13,317,2017-11-09 07:46:35,0
+89661,2,1,10,122,2017-11-06 16:49:33,0
+89722,3,1,19,173,2017-11-07 13:14:35,0
+38546,28,1,13,135,2017-11-08 06:44:13,0
+239540,12,1,13,245,2017-11-08 05:57:29,0
+27526,2,1,105,452,2017-11-09 10:45:24,0
+69197,22,1,4,496,2017-11-07 06:02:44,0
+355971,12,2,13,178,2017-11-09 03:48:35,0
+69960,3,1,19,280,2017-11-09 06:03:04,0
+37855,2,1,19,435,2017-11-08 07:56:58,0
+175592,9,1,25,134,2017-11-08 10:09:54,0
+273298,3,1,9,280,2017-11-08 09:03:46,0
+38406,8,1,13,145,2017-11-07 01:25:45,0
+84680,3,1,17,280,2017-11-09 00:39:05,0
+36862,6,1,19,125,2017-11-08 06:47:53,0
+33402,1,1,9,135,2017-11-09 04:02:23,0
+77041,2,1,18,477,2017-11-07 16:29:34,0
+153621,3,1,17,115,2017-11-08 01:10:47,0
+306237,11,1,17,469,2017-11-09 00:01:09,0
+73487,9,2,9,134,2017-11-06 23:43:59,0
+45745,11,1,47,122,2017-11-08 12:08:20,0
+47227,21,1,13,128,2017-11-07 10:38:25,0
+114314,9,1,20,232,2017-11-09 04:29:47,0
+87620,18,1,19,134,2017-11-08 11:15:29,0
+26807,13,1,6,477,2017-11-08 11:02:22,0
+62671,8,1,19,145,2017-11-09 03:57:42,0
+40125,21,1,19,128,2017-11-07 14:22:46,0
+98881,3,1,13,280,2017-11-08 13:57:27,0
+70247,21,1,14,128,2017-11-07 01:58:35,0
+108260,2,1,15,219,2017-11-07 10:42:33,0
+201269,2,1,12,435,2017-11-07 04:05:32,0
+200154,12,1,17,178,2017-11-07 13:26:59,0
+26995,9,1,10,334,2017-11-09 12:38:54,0
+53454,2,1,13,477,2017-11-07 16:33:07,0
+171503,9,1,26,445,2017-11-09 06:18:37,0
+110009,7,1,13,101,2017-11-07 09:55:18,0
+271126,3,1,6,371,2017-11-08 01:09:16,0
+130629,12,1,17,178,2017-11-08 02:02:35,0
+95388,3,1,3,280,2017-11-09 05:27:33,0
+75436,25,1,13,259,2017-11-08 01:46:42,0
+119289,12,2,27,145,2017-11-08 11:20:25,0
+98622,15,1,18,265,2017-11-07 07:56:22,0
+58934,14,1,20,489,2017-11-09 01:12:35,0
+189183,1,1,19,153,2017-11-08 21:32:20,0
+79180,18,1,4,107,2017-11-07 02:52:43,0
+57732,10,2,100,113,2017-11-07 06:27:24,0
+270168,1,1,46,153,2017-11-08 09:25:41,0
+80223,28,1,19,135,2017-11-09 04:27:37,0
+66520,19,3371,24,213,2017-11-08 21:37:57,0
+62009,12,1,17,245,2017-11-07 06:31:59,0
+25373,26,1,13,266,2017-11-07 14:41:19,0
+159857,28,1,8,135,2017-11-09 06:20:52,0
+22006,3,1,22,19,2017-11-09 07:02:19,0
+124096,3,1,22,280,2017-11-09 00:04:18,0
+134957,27,1,19,122,2017-11-07 00:20:05,0
+54960,15,1,13,265,2017-11-09 02:49:56,0
+26573,15,1,13,130,2017-11-08 05:10:46,0
+121332,9,1,19,134,2017-11-08 01:06:15,0
+79717,12,1,19,178,2017-11-08 02:38:25,0
+107164,9,1,17,107,2017-11-09 00:07:12,0
+132492,7,1,19,101,2017-11-07 08:42:16,0
+123040,18,1,13,107,2017-11-08 14:56:32,0
+107809,11,1,37,173,2017-11-09 12:56:30,0
+53964,18,1,13,134,2017-11-07 09:13:22,0
+25041,3,1,32,280,2017-11-07 12:12:55,0
+56166,18,1,14,317,2017-11-07 04:34:05,0
+19264,13,1,19,400,2017-11-08 14:22:04,0
+193061,15,1,13,315,2017-11-08 06:35:42,0
+197093,15,1,15,245,2017-11-07 02:31:51,0
+16050,1,1,3,134,2017-11-07 00:01:16,0
+67754,12,2,9,326,2017-11-07 14:57:47,0
+67439,12,2,9,245,2017-11-09 04:12:11,0
+88172,14,1,19,480,2017-11-07 09:04:34,0
+73487,3,2,41,153,2017-11-07 09:42:22,0
+80827,12,1,3,19,2017-11-09 14:51:01,0
+58973,15,1,18,130,2017-11-08 03:15:36,0
+249632,1,1,17,124,2017-11-08 15:58:08,0
+114878,9,1,13,442,2017-11-07 15:31:43,0
+50752,12,2,13,245,2017-11-09 05:22:57,0
+39180,18,1,866,107,2017-11-09 14:08:12,0
+188345,3,1,18,280,2017-11-07 04:42:21,0
+7572,2,1,10,122,2017-11-07 21:38:32,0
+17988,15,1,6,245,2017-11-06 23:49:15,0
+27793,18,1,19,107,2017-11-08 14:07:50,0
+212485,22,1,19,116,2017-11-09 01:30:34,0
+315062,3,1,13,280,2017-11-09 07:05:32,0
+121209,14,1,13,489,2017-11-07 23:16:20,0
+169058,20,1,37,478,2017-11-07 08:47:31,0
+79916,26,1,17,477,2017-11-08 22:16:16,0
+79827,3,1,8,280,2017-11-07 06:43:06,0
+125432,26,1,19,266,2017-11-07 08:47:18,0
+17677,2,1,9,469,2017-11-07 01:09:04,0
+112617,14,1,22,123,2017-11-07 09:53:25,0
+75329,21,1,16,232,2017-11-08 09:06:54,0
+236269,27,1,2,153,2017-11-07 23:53:04,0
+136279,13,1,19,477,2017-11-07 05:26:57,0
+12091,3,1,27,280,2017-11-08 18:58:23,0
+181311,7,1,37,101,2017-11-07 03:08:12,0
+76727,23,1,19,153,2017-11-09 09:08:59,0
+82039,11,1,20,325,2017-11-08 16:19:07,0
+48240,2,1,12,237,2017-11-07 13:23:14,0
+14877,12,1,19,178,2017-11-08 09:20:16,0
+92916,3,1,18,280,2017-11-08 22:19:12,0
+119929,12,1,19,245,2017-11-07 12:05:42,0
+55047,9,1,26,134,2017-11-07 00:47:21,0
+97982,3,1,19,280,2017-11-08 05:58:21,0
+50238,15,1,53,265,2017-11-07 14:20:34,0
+29489,3,1,9,173,2017-11-07 12:48:36,0
+72357,25,1,17,259,2017-11-09 11:46:21,0
+117898,15,2,13,245,2017-11-08 23:28:38,0
+73516,9,1,13,127,2017-11-09 15:20:26,0
+109413,3,1,19,280,2017-11-08 04:40:01,0
+47962,12,1,13,265,2017-11-06 21:00:22,0
+13076,27,1,13,122,2017-11-09 11:38:38,0
+38422,12,1,20,140,2017-11-08 09:08:38,0
+36183,14,1,20,401,2017-11-07 21:43:49,0
+92890,14,1,19,442,2017-11-07 06:12:35,0
+120444,3,1,9,130,2017-11-09 08:11:04,0
+67439,12,1,17,326,2017-11-06 16:09:08,0
+193097,2,2,3,364,2017-11-07 15:00:49,0
+88935,2,1,19,469,2017-11-09 08:56:47,0
+53408,2,1,13,258,2017-11-08 04:46:38,0
+203084,18,1,13,107,2017-11-09 10:00:09,0
+167094,3,1,9,280,2017-11-07 04:24:41,0
+119349,12,1,41,259,2017-11-09 12:49:27,0
+70677,11,1,13,122,2017-11-07 01:36:04,0
+167482,21,1,18,232,2017-11-07 17:11:38,0
+85631,3,1,18,205,2017-11-09 07:53:06,0
+77840,14,1,19,442,2017-11-09 14:02:53,0
+73467,12,1,13,328,2017-11-07 07:57:58,0
+122991,64,1,8,459,2017-11-07 09:19:21,0
+159834,18,1,4,134,2017-11-09 11:51:38,0
+107041,1,1,13,377,2017-11-07 05:41:59,0
+12689,3,1,19,135,2017-11-08 08:30:27,0
+44527,3,1,19,153,2017-11-08 17:52:03,0
+73839,15,1,19,245,2017-11-07 14:06:53,0
+90224,13,1,13,477,2017-11-07 00:44:39,0
+125295,3,1,13,317,2017-11-08 07:05:37,0
+119349,14,1,28,446,2017-11-09 11:03:57,0
+171263,12,1,19,328,2017-11-09 12:36:11,0
+41203,15,1,19,245,2017-11-08 04:10:33,0
+114597,9,1,9,134,2017-11-08 00:08:16,0
+65352,15,1,19,278,2017-11-09 12:39:31,0
+53715,2,1,47,469,2017-11-09 11:26:57,0
+137052,12,2,13,145,2017-11-07 11:25:33,0
+99945,9,1,15,445,2017-11-07 04:39:17,0
+125551,14,1,13,467,2017-11-06 23:20:45,0
+47243,12,1,18,259,2017-11-07 09:20:57,0
+6581,3,1,19,280,2017-11-08 11:44:32,0
+37506,1,1,20,125,2017-11-07 18:23:54,0
+93021,12,1,19,259,2017-11-08 13:45:25,0
+67944,23,1,19,153,2017-11-08 23:36:46,0
+185236,12,1,13,178,2017-11-08 11:33:54,0
+69775,3,1,15,424,2017-11-08 05:58:40,0
+94778,3,1,13,280,2017-11-07 07:41:14,0
+76855,2,1,19,401,2017-11-08 12:26:22,0
+32335,15,1,32,140,2017-11-07 19:12:35,0
+96224,9,1,19,215,2017-11-09 05:01:08,0
+358741,10,1,19,377,2017-11-09 08:33:04,0
+13974,26,1,13,121,2017-11-09 03:20:13,0
+70956,15,1,13,430,2017-11-07 08:42:09,0
+80743,12,1,19,328,2017-11-08 08:45:21,0
+110589,25,1,13,259,2017-11-08 13:57:23,0
+128210,6,1,13,125,2017-11-09 01:46:09,0
+214842,27,1,10,153,2017-11-08 00:12:55,0
+119798,15,1,25,245,2017-11-08 16:59:07,0
+84587,3,1,17,280,2017-11-07 12:10:35,0
+163326,3,1,37,280,2017-11-07 03:18:15,0
+5348,2,1,15,219,2017-11-07 11:12:53,0
+1471,26,1,25,266,2017-11-07 10:41:25,0
+92766,18,1,13,107,2017-11-07 09:26:38,0
+35703,18,1,1,107,2017-11-07 05:42:18,0
+352095,15,1,37,379,2017-11-08 18:56:01,0
+74617,25,1,12,259,2017-11-07 06:03:41,0
+40631,14,1,19,349,2017-11-09 00:22:26,0
+75786,2,1,19,205,2017-11-07 09:25:10,0
+30203,13,1,19,477,2017-11-07 03:22:34,0
+12506,32,1,41,376,2017-11-08 14:17:28,0
+88923,14,1,35,480,2017-11-08 04:05:18,0
+18589,26,1,19,121,2017-11-06 23:12:07,0
+60045,11,1,1,122,2017-11-07 03:37:08,0
+94906,12,1,41,259,2017-11-08 14:01:07,0
+221041,2,1,37,435,2017-11-07 16:59:23,0
+114276,12,1,14,178,2017-11-09 13:03:12,0
+78881,18,1,20,134,2017-11-07 14:34:42,0
+220427,9,1,22,466,2017-11-08 04:43:48,0
+5178,9,2,78,244,2017-11-09 10:53:33,0
+85482,9,1,41,232,2017-11-09 06:29:53,0
+86767,13,1,8,469,2017-11-08 02:23:22,0
+112302,14,1,19,371,2017-11-09 08:53:03,0
+114220,15,1,19,278,2017-11-08 10:02:28,0
+124540,2,2,9,477,2017-11-07 11:59:43,0
+63262,9,1,19,232,2017-11-09 03:58:07,0
+30647,3,1,8,173,2017-11-08 06:58:43,0
+116272,2,1,19,469,2017-11-07 03:53:55,0
+5348,14,1,47,379,2017-11-07 13:23:40,0
+271020,15,1,19,315,2017-11-09 08:03:03,0
+68900,9,1,10,466,2017-11-09 05:28:03,0
+81550,12,1,13,178,2017-11-07 17:17:47,0
+114965,12,1,19,409,2017-11-07 13:34:16,0
+40440,9,1,19,232,2017-11-09 11:16:48,0
+48490,3,1,13,211,2017-11-09 05:44:24,0
+8179,12,1,41,245,2017-11-08 16:50:51,0
+145934,12,1,28,178,2017-11-07 09:00:00,0
+92673,26,1,13,266,2017-11-09 10:36:03,0
+69691,12,1,13,19,2017-11-08 09:05:10,0
+4062,2,2,13,237,2017-11-09 01:35:39,0
+105433,2,1,31,205,2017-11-08 13:58:13,0
+45870,15,1,13,245,2017-11-08 17:24:54,0
+62955,15,1,17,315,2017-11-09 07:20:50,0
+100393,14,1,2,439,2017-11-09 14:50:32,0
+85644,24,2,19,105,2017-11-06 16:06:36,0
+39135,1,1,6,153,2017-11-07 10:24:50,0
+3363,3,1,18,280,2017-11-07 06:44:23,0
+114904,2,1,19,205,2017-11-08 07:57:47,0
+64325,14,1,13,480,2017-11-09 00:54:18,0
+77314,43,1,18,330,2017-11-09 10:41:35,0
+5314,3,1,47,280,2017-11-08 13:33:18,0
+202529,9,1,13,466,2017-11-08 15:17:14,0
+46371,24,2,30,105,2017-11-08 23:52:25,0
+116344,9,1,13,107,2017-11-09 00:24:39,0
+184489,13,1,19,477,2017-11-06 23:27:55,0
+18439,18,1,19,107,2017-11-08 15:51:34,0
+55100,3,1,14,135,2017-11-07 08:16:45,0
+69973,18,1,13,107,2017-11-08 23:38:26,0
+123994,9,1,19,244,2017-11-07 14:41:17,0
+67530,3,1,49,211,2017-11-08 10:24:05,0
+92735,12,2,36,326,2017-11-08 02:51:49,0
+137052,3,1,1,442,2017-11-07 01:28:01,0
+105475,3,1,19,409,2017-11-07 13:39:47,0
+110476,14,1,19,442,2017-11-08 05:54:29,0
+114276,18,1,17,107,2017-11-08 11:42:03,0
+70532,9,1,13,258,2017-11-08 15:15:47,0
+5222,3,1,18,280,2017-11-08 11:40:13,0
+113326,2,1,25,219,2017-11-07 01:31:38,0
+159671,12,1,19,178,2017-11-08 02:53:28,0
+106723,2,1,8,477,2017-11-06 17:17:46,0
+208468,21,1,19,128,2017-11-08 08:16:20,0
+42289,3,2,49,130,2017-11-08 13:16:42,0
+35188,3,1,13,205,2017-11-09 12:52:29,0
+41691,12,1,17,409,2017-11-09 05:26:05,0
+48212,3,1,22,137,2017-11-09 12:00:40,0
+149030,2,1,19,377,2017-11-08 10:49:03,0
+37462,3,1,19,452,2017-11-07 19:01:54,0
+63993,64,1,13,459,2017-11-07 23:36:59,0
+121475,12,1,13,245,2017-11-07 14:37:27,0
+157590,14,1,47,463,2017-11-08 08:16:09,0
+103377,2,2,28,237,2017-11-09 09:47:19,0
+284996,2,1,12,364,2017-11-08 00:56:47,0
+48240,18,1,19,439,2017-11-08 05:01:12,0
+125241,3,1,22,173,2017-11-07 00:34:09,0
+2387,15,1,6,153,2017-11-08 06:01:44,0
+69510,20,1,19,478,2017-11-07 05:15:30,0
+81013,7,1,19,101,2017-11-07 11:29:34,0
+123703,14,1,37,480,2017-11-09 13:31:13,0
+46745,22,1,17,116,2017-11-07 08:05:24,0
+74006,3,1,19,137,2017-11-07 07:28:42,0
+149097,2,1,22,435,2017-11-07 01:35:29,0
+114314,2,1,13,401,2017-11-08 13:36:06,0
+100959,3,1,17,409,2017-11-08 14:04:26,0
+163593,27,1,19,153,2017-11-08 02:28:04,0
+121710,3,1,6,130,2017-11-07 06:10:50,0
+31090,55,1,25,453,2017-11-08 04:08:41,0
+220834,19,0,24,213,2017-11-08 15:05:53,0
+55080,9,1,13,334,2017-11-08 02:24:53,0
+94385,13,1,19,477,2017-11-09 14:26:32,0
+105855,2,1,13,364,2017-11-07 04:25:17,0
+69117,18,1,47,121,2017-11-06 16:33:40,0
+100933,21,1,19,232,2017-11-08 03:59:27,0
+217833,19,0,24,347,2017-11-08 08:04:12,0
+209,3,1,16,280,2017-11-08 15:13:44,0
+90521,12,1,19,140,2017-11-09 09:24:29,0
+36407,2,2,10,237,2017-11-07 03:30:37,0
+1074,3,1,11,211,2017-11-08 22:33:22,0
+28950,14,1,19,113,2017-11-08 02:41:55,0
+111153,15,2,19,3,2017-11-08 17:14:36,0
+48679,64,1,14,459,2017-11-07 07:14:08,0
+14116,2,1,10,237,2017-11-09 07:33:33,0
+120425,15,1,19,480,2017-11-08 03:44:43,0
+184583,3,1,13,135,2017-11-07 04:37:35,0
+95669,3,1,22,409,2017-11-07 06:56:22,0
+28011,28,1,19,135,2017-11-09 14:17:42,0
+55963,1,1,17,135,2017-11-08 01:00:33,0
+105475,7,1,3,101,2017-11-09 15:43:42,0
+77523,23,1,12,153,2017-11-09 06:51:23,0
+106749,12,1,15,178,2017-11-08 12:01:54,0
+68758,15,1,19,315,2017-11-09 04:30:19,0
+58363,21,1,13,128,2017-11-09 03:18:41,0
+79909,12,1,8,205,2017-11-08 15:36:23,0
+137052,14,1,3,442,2017-11-07 14:43:19,0
+73516,18,1,19,121,2017-11-08 16:26:00,0
+658,3,1,13,280,2017-11-09 02:44:34,0
+101074,2,1,18,237,2017-11-08 04:23:23,0
+116708,12,1,19,340,2017-11-09 06:23:58,0
+61718,3,1,15,280,2017-11-08 10:46:32,0
+25553,2,1,3,205,2017-11-07 17:38:04,0
+66066,7,1,27,101,2017-11-07 11:16:40,0
+38227,18,1,13,121,2017-11-08 04:30:36,0
+106655,27,1,10,122,2017-11-08 13:03:58,0
+67754,2,2,19,258,2017-11-08 12:44:50,0
+53454,9,1,12,234,2017-11-07 20:08:49,0
+172724,3,1,13,409,2017-11-07 06:13:28,0
+190464,8,1,10,145,2017-11-07 08:26:48,0
+162437,3,1,13,130,2017-11-06 22:55:44,0
+3262,18,1,17,107,2017-11-06 16:43:26,0
+203386,11,1,18,319,2017-11-08 06:10:30,0
+43827,20,1,19,259,2017-11-08 11:18:27,0
+114547,14,1,13,439,2017-11-08 00:42:44,0
+234955,9,1,41,234,2017-11-09 07:11:24,0
+80058,29,1,27,210,2017-11-09 10:37:00,0
+119125,9,1,25,215,2017-11-08 15:43:37,0
+116001,28,1,13,135,2017-11-07 12:01:19,0
+38413,18,1,31,107,2017-11-07 13:36:43,0
+208036,18,1,6,121,2017-11-08 06:40:51,0
+40190,12,1,19,265,2017-11-08 04:25:49,0
+297731,3,1,39,211,2017-11-08 16:09:49,0
+57953,11,1,12,122,2017-11-09 14:07:27,0
+31959,12,1,19,259,2017-11-09 15:50:23,0
+9704,9,1,1,127,2017-11-08 15:01:43,0
+105433,2,1,10,205,2017-11-07 02:31:08,0
+13643,2,1,13,237,2017-11-08 07:29:11,0
+84644,15,1,8,480,2017-11-09 13:28:09,0
+143837,12,1,12,178,2017-11-07 09:35:43,0
+144643,12,1,30,265,2017-11-07 10:20:09,0
+14969,15,1,6,315,2017-11-07 02:03:49,0
+88168,3,1,19,280,2017-11-09 01:08:40,0
+126371,3,1,13,130,2017-11-08 03:56:40,0
+106337,3,1,9,280,2017-11-09 05:10:09,0
+209663,15,1,19,245,2017-11-06 16:32:34,0
+75312,9,1,1,244,2017-11-07 03:57:42,0
+33746,14,1,18,467,2017-11-08 21:06:58,0
+71483,14,1,13,442,2017-11-09 02:45:07,0
+144353,21,1,20,128,2017-11-09 00:32:00,0
+43827,12,1,17,265,2017-11-07 09:19:03,0
+102879,9,1,13,127,2017-11-09 13:56:50,0
+45657,3,1,13,280,2017-11-07 06:37:08,0
+5314,27,1,22,153,2017-11-09 15:56:11,0
+8582,18,1,22,439,2017-11-07 09:35:50,0
+8109,9,1,18,215,2017-11-09 12:26:53,0
+73610,12,1,19,140,2017-11-08 01:14:27,0
+19125,22,1,10,116,2017-11-07 07:52:28,0
+92825,9,1,12,334,2017-11-09 10:14:38,0
+84105,6,1,18,459,2017-11-08 06:19:10,0
+93592,109,0,24,347,2017-11-09 05:15:17,0
+130163,9,1,19,442,2017-11-09 01:12:00,0
+119028,3,1,17,424,2017-11-07 00:47:58,0
+39756,2,2,97,205,2017-11-07 13:16:54,0
+119349,13,1,22,477,2017-11-09 11:57:37,0
+56619,15,1,27,480,2017-11-07 22:17:27,0
+119031,2,1,19,435,2017-11-07 07:20:13,0
+116882,12,1,19,265,2017-11-08 12:21:02,0
+123994,12,1,19,178,2017-11-07 04:06:17,0
+100099,12,1,13,328,2017-11-07 14:28:11,0
+110354,58,3866,866,347,2017-11-08 22:37:05,0
+5314,19,0,0,347,2017-11-09 13:13:59,0
+146575,12,1,13,245,2017-11-07 14:38:16,0
+57519,12,1,12,265,2017-11-08 20:59:29,0
+22387,12,1,10,265,2017-11-07 06:30:45,0
+24039,12,1,19,178,2017-11-09 05:51:29,0
+95766,3,1,8,130,2017-11-07 06:45:08,0
+114719,11,1,10,487,2017-11-09 14:47:11,0
+209663,15,1,13,245,2017-11-07 22:59:40,0
+79671,27,1,15,153,2017-11-08 12:58:23,0
+78833,18,1,25,107,2017-11-08 12:14:22,0
+179344,3,1,27,409,2017-11-09 12:04:17,0
+141925,2,1,15,212,2017-11-08 15:56:45,0
+204884,18,1,10,121,2017-11-06 18:25:21,0
+118367,12,1,15,259,2017-11-06 22:49:01,0
+40914,2,1,35,122,2017-11-08 06:28:42,0
+1745,15,1,19,379,2017-11-07 11:26:40,0
+15990,3,1,6,115,2017-11-08 01:52:37,0
+56246,7,2,10,101,2017-11-09 07:52:10,0
+157047,12,1,25,245,2017-11-07 11:20:39,0
+201370,14,1,13,379,2017-11-06 23:11:27,0
+21633,9,2,9,215,2017-11-09 05:09:33,0
+5314,8,2,19,145,2017-11-07 19:12:12,0
+47273,8,2,25,140,2017-11-08 14:32:38,0
+72022,3,1,17,371,2017-11-09 07:34:23,0
+88541,12,1,47,212,2017-11-07 05:41:26,0
+61877,3,1,19,137,2017-11-08 12:23:16,0
+84488,20,1,17,259,2017-11-07 07:40:46,0
+14085,2,1,12,237,2017-11-08 02:40:33,0
+170063,15,1,3,245,2017-11-07 05:48:00,0
+65555,12,1,13,245,2017-11-08 05:19:03,0
+62339,2,1,53,401,2017-11-07 06:18:11,0
+200198,12,1,22,178,2017-11-07 05:27:00,0
+31403,3,1,8,211,2017-11-09 08:20:44,0
+172003,14,1,19,446,2017-11-08 02:36:24,0
+114276,9,1,20,120,2017-11-08 03:05:49,0
+108913,12,1,13,219,2017-11-07 17:00:01,0
+3196,15,1,13,315,2017-11-08 12:23:04,0
+66948,3,1,13,480,2017-11-07 01:08:16,0
+38900,14,1,19,379,2017-11-06 23:04:55,0
+13186,26,1,37,121,2017-11-09 04:05:13,0
+180298,2,1,25,212,2017-11-07 04:17:08,0
+14611,13,1,9,477,2017-11-09 14:44:37,0
+19743,18,1,3,121,2017-11-09 14:05:10,0
+118094,9,1,8,232,2017-11-09 14:05:07,0
+50169,18,1,23,121,2017-11-08 08:46:29,0
+59361,12,1,19,259,2017-11-09 03:12:22,0
+3279,12,1,19,245,2017-11-08 05:20:07,0
+114446,7,1,28,101,2017-11-09 08:13:06,0
+37763,3,1,25,489,2017-11-07 02:59:26,0
+53454,26,1,19,121,2017-11-08 04:04:10,0
+123978,3,1,19,280,2017-11-07 08:58:56,0
+111025,9,1,3,466,2017-11-08 16:23:44,0
+222163,3,1,18,280,2017-11-08 06:38:21,0
+129623,24,1,19,105,2017-11-07 08:28:50,0
+275692,9,2,9,215,2017-11-08 23:58:15,0
+113475,3,1,17,317,2017-11-07 12:52:16,0
+147,15,1,19,265,2017-11-08 02:02:46,0
+43537,20,1,19,478,2017-11-08 05:39:06,0
+63619,3,1,17,280,2017-11-07 01:33:28,0
+7645,3,1,19,130,2017-11-09 05:16:13,0
+105128,12,1,13,481,2017-11-08 13:34:48,0
+284210,13,1,19,477,2017-11-08 12:37:08,0
+48282,12,1,19,328,2017-11-09 01:02:08,0
+54841,13,1,3,477,2017-11-07 09:37:57,0
+110589,11,1,16,319,2017-11-09 12:33:17,0
+18311,3,1,17,280,2017-11-09 15:11:03,0
+156538,3,1,13,115,2017-11-06 18:46:16,0
+139710,15,1,17,412,2017-11-09 11:21:12,0
+3133,12,1,3,328,2017-11-07 10:13:25,0
+271799,12,1,13,265,2017-11-09 07:32:17,0
+31047,27,1,13,153,2017-11-08 04:44:10,0
+109147,15,1,19,245,2017-11-08 00:33:31,0
+300702,18,1,19,107,2017-11-09 12:07:33,0
+140192,18,1,17,121,2017-11-08 01:13:34,0
+37253,19,0,21,213,2017-11-07 19:33:43,0
+34691,9,1,13,442,2017-11-07 06:32:59,0
+81167,15,1,8,379,2017-11-09 08:40:47,0
+239518,15,1,18,245,2017-11-08 08:31:36,0
+91885,12,1,10,178,2017-11-08 10:19:37,0
+92900,12,1,18,265,2017-11-08 02:02:56,0
+47148,3,2,9,137,2017-11-09 05:54:19,0
+53651,207,1,13,488,2017-11-07 01:36:07,0
+24700,15,1,19,3,2017-11-07 18:15:16,0
+34208,14,1,10,439,2017-11-07 09:52:32,0
+81714,12,1,37,265,2017-11-07 04:50:35,0
+46558,3,1,13,280,2017-11-09 04:31:38,0
+58905,3,1,48,480,2017-11-08 12:59:31,0
+22975,2,1,19,477,2017-11-07 22:15:18,0
+194151,15,1,22,315,2017-11-09 12:51:08,0
+80000,18,1,17,439,2017-11-08 08:18:45,0
+74268,2,1,19,243,2017-11-09 03:47:06,0
+14692,3,1,23,489,2017-11-09 12:34:05,0
+102235,14,1,19,401,2017-11-07 03:55:24,0
+185465,3,1,13,280,2017-11-09 01:11:29,0
+123635,18,1,18,107,2017-11-08 16:25:25,0
+83723,18,1,15,107,2017-11-07 00:15:57,0
+115273,26,1,13,266,2017-11-09 12:33:14,0
+210444,12,1,47,497,2017-11-07 12:02:50,0
+5168,12,1,15,145,2017-11-08 16:33:37,0
+115634,1,1,13,125,2017-11-08 15:20:10,0
+68600,3,1,22,137,2017-11-07 17:00:22,0
+78374,21,1,19,232,2017-11-09 10:33:29,0
+16426,64,1,20,459,2017-11-09 04:46:14,0
+67237,2,1,19,452,2017-11-07 13:15:47,0
+8356,19,0,50,213,2017-11-08 09:51:10,0
+114904,2,2,11,205,2017-11-08 05:23:54,0
+88696,6,1,15,459,2017-11-07 09:31:32,0
+125736,3,1,25,280,2017-11-08 18:56:14,0
+56707,18,1,13,107,2017-11-09 05:14:52,0
+78943,6,1,18,101,2017-11-08 10:27:02,0
+24951,19,37,24,213,2017-11-08 09:36:24,0
+93587,25,1,19,259,2017-11-09 05:04:19,0
+153621,15,1,22,412,2017-11-08 00:22:43,0
+49234,3,1,13,480,2017-11-07 04:37:58,0
+121224,12,1,13,122,2017-11-07 16:26:26,0
+124763,15,1,8,430,2017-11-07 09:05:11,0
+175237,3,1,19,280,2017-11-07 06:38:46,0
+37417,3,1,22,280,2017-11-08 10:23:54,0
+37563,14,1,53,379,2017-11-06 23:41:49,0
+220616,15,1,18,480,2017-11-08 00:09:26,0
+105323,2,1,19,205,2017-11-08 17:01:43,0
+177567,14,1,20,439,2017-11-07 03:57:23,0
+116472,14,1,22,113,2017-11-09 10:39:38,0
+98424,23,1,13,153,2017-11-06 23:34:03,0
+321222,15,1,19,245,2017-11-09 00:54:04,0
+30564,15,2,19,315,2017-11-08 10:03:58,0
+53454,3,2,28,137,2017-11-07 11:21:29,0
+350870,3,1,13,211,2017-11-09 11:54:51,0
+81076,27,1,20,153,2017-11-07 23:33:04,0
+78281,3,1,19,379,2017-11-08 14:02:00,0
+124423,3,1,18,424,2017-11-07 16:52:46,0
+2996,3,1,13,280,2017-11-08 11:47:08,0
+58172,3,1,20,135,2017-11-08 08:10:34,0
+106537,18,1,9,121,2017-11-07 08:04:40,0
+42812,13,1,55,477,2017-11-09 04:44:03,0
+178618,18,1,17,107,2017-11-09 02:47:44,0
+49431,12,1,13,245,2017-11-07 23:41:04,0
+39546,12,1,19,259,2017-11-09 05:31:28,0
+106279,7,1,13,101,2017-11-09 06:48:27,0
+44067,12,1,19,328,2017-11-07 11:12:25,0
+193551,19,0,24,213,2017-11-07 11:02:17,1
+90655,25,1,3,259,2017-11-08 17:54:23,0
+973,18,1,19,439,2017-11-08 07:41:28,0
+31387,18,1,1,107,2017-11-07 06:49:27,0
+35984,13,1,8,477,2017-11-07 09:00:20,0
+45738,15,1,20,245,2017-11-07 01:03:28,0
+50482,2,1,15,477,2017-11-09 06:06:56,0
+83481,2,1,13,212,2017-11-07 15:51:23,0
+75634,8,1,17,145,2017-11-07 18:52:56,0
+70749,3,1,17,280,2017-11-07 09:42:01,0
+254857,18,1,20,107,2017-11-08 15:07:26,0
+133827,12,1,13,178,2017-11-07 03:08:04,0
+176997,3,1,12,424,2017-11-07 20:30:42,0
+51973,26,1,32,121,2017-11-09 13:57:17,0
+70896,15,1,30,111,2017-11-09 06:01:14,0
+152945,13,1,30,477,2017-11-07 07:56:59,0
+48581,2,1,13,236,2017-11-07 06:31:52,0
+197748,12,1,46,265,2017-11-07 11:23:28,0
+93632,18,1,19,121,2017-11-09 13:45:04,0
+59125,7,1,13,101,2017-11-09 06:12:54,0
+40241,1,1,18,115,2017-11-09 06:44:38,0
+105560,8,1,19,145,2017-11-07 23:45:31,0
+99542,3,1,6,280,2017-11-09 00:58:49,0
+109619,32,1,13,376,2017-11-08 16:15:36,0
+78564,3,1,13,115,2017-11-09 00:25:28,0
+5348,46,0,38,347,2017-11-07 08:39:03,0
+69735,23,1,26,153,2017-11-08 04:29:23,0
+100073,14,1,18,463,2017-11-08 10:21:28,0
+158274,23,1,31,153,2017-11-07 05:17:01,0
+248066,9,1,19,134,2017-11-08 00:08:50,0
+259330,12,1,19,409,2017-11-08 05:16:59,0
+85154,12,1,35,178,2017-11-08 23:07:19,0
+50510,21,1,25,128,2017-11-09 08:01:45,0
+90053,1,1,25,349,2017-11-08 15:40:03,0
+98738,2,1,53,469,2017-11-07 12:47:12,0
+171221,15,1,32,245,2017-11-07 15:07:11,0
+109734,6,1,19,125,2017-11-07 12:14:24,0
+95089,15,1,26,140,2017-11-07 17:53:14,0
+13034,3,1,19,135,2017-11-09 13:32:12,0
+107571,12,1,19,245,2017-11-07 11:01:30,0
+113357,13,1,25,477,2017-11-07 21:31:44,0
+63550,12,2,13,265,2017-11-08 15:57:42,0
+90588,12,1,9,259,2017-11-08 06:20:23,0
+37855,3,1,19,173,2017-11-07 01:02:44,0
+110031,15,1,9,278,2017-11-09 12:52:59,0
+35787,3,1,1,280,2017-11-09 04:14:17,0
+114904,2,1,18,205,2017-11-09 14:55:42,0
+7707,15,1,13,245,2017-11-08 15:53:26,0
+16453,29,2,35,343,2017-11-08 11:38:02,0
+40387,15,1,22,245,2017-11-09 05:14:27,0
+153056,15,1,19,315,2017-11-08 07:04:57,0
+34401,12,1,13,265,2017-11-08 02:58:23,0
+189065,3,1,25,280,2017-11-09 00:22:09,0
+8391,14,1,13,442,2017-11-09 14:05:10,0
+29372,93,1,19,371,2017-11-09 15:18:38,0
+15117,6,1,19,459,2017-11-07 12:22:26,0
+130629,9,1,13,258,2017-11-07 16:21:30,0
+323993,2,1,16,237,2017-11-09 01:49:35,0
+53715,9,1,30,466,2017-11-09 09:46:28,0
+123316,2,1,17,237,2017-11-07 07:22:14,0
+102919,12,1,19,178,2017-11-09 08:23:24,0
+5147,3,1,17,280,2017-11-09 06:34:37,0
+43881,18,1,13,107,2017-11-09 03:51:07,0
+67291,22,1,22,116,2017-11-09 00:21:39,0
+48383,3,1,14,280,2017-11-09 02:59:26,0
+88089,2,1,19,364,2017-11-08 01:06:45,0
+107739,14,1,16,439,2017-11-08 14:28:56,0
+159945,12,1,8,259,2017-11-07 03:17:00,0
+64435,12,1,19,178,2017-11-07 10:23:04,0
+37148,3,1,9,424,2017-11-09 03:14:47,0
+102768,21,1,19,128,2017-11-09 05:58:28,0
+9886,12,1,13,19,2017-11-08 10:44:32,0
+156753,2,1,19,219,2017-11-07 03:41:22,0
+49538,2,1,6,477,2017-11-06 23:42:13,0
+3093,8,1,25,259,2017-11-07 11:00:58,0
+54332,12,2,43,140,2017-11-07 09:11:40,0
+329098,21,2,22,128,2017-11-09 12:03:27,0
+13331,3,1,18,280,2017-11-09 04:34:25,0
+125954,12,1,19,497,2017-11-08 10:39:59,0
+44907,21,1,19,128,2017-11-07 10:06:37,0
+91734,2,1,13,205,2017-11-08 01:10:15,0
+22037,18,1,13,107,2017-11-08 14:04:28,0
+202156,9,1,13,334,2017-11-07 06:20:29,0
+22758,15,1,8,245,2017-11-07 11:44:53,0
+39083,12,1,8,259,2017-11-07 11:50:42,0
+60854,26,1,13,266,2017-11-09 15:14:22,0
+5348,9,2,13,258,2017-11-07 16:41:15,0
+212132,15,1,19,430,2017-11-09 10:01:51,0
+22978,23,1,22,153,2017-11-08 11:34:55,0
+30626,2,1,20,258,2017-11-08 06:13:05,0
+177174,21,1,13,128,2017-11-09 04:37:00,0
+102357,12,2,20,178,2017-11-09 14:57:44,0
+184601,3,1,28,480,2017-11-08 09:31:46,0
+88856,12,1,19,245,2017-11-07 13:56:07,0
+112064,12,1,13,259,2017-11-09 02:38:34,0
+122733,2,1,19,122,2017-11-08 09:08:17,0
+5178,22,1,3,496,2017-11-07 04:23:29,0
+5348,9,1,25,442,2017-11-07 16:14:52,0
+193454,14,1,13,489,2017-11-09 04:47:25,0
+90791,9,1,6,466,2017-11-09 01:02:09,0
+197864,15,1,13,430,2017-11-09 02:29:05,0
+152416,2,1,13,477,2017-11-07 00:37:11,0
+89946,3,1,11,280,2017-11-08 09:26:04,0
+64435,14,1,19,463,2017-11-07 04:13:10,0
+5348,21,2,13,128,2017-11-08 12:50:49,0
+107155,20,2,15,259,2017-11-09 14:21:06,0
+114276,9,1,8,244,2017-11-08 02:17:28,0
+201499,3,1,19,489,2017-11-09 09:56:34,0
+93054,12,1,8,259,2017-11-07 12:01:43,0
+45304,18,1,19,449,2017-11-08 02:12:16,0
+330543,93,1,19,371,2017-11-09 13:55:19,0
+26726,12,1,13,259,2017-11-08 00:31:31,0
+90866,27,1,28,153,2017-11-09 00:08:17,0
+43593,3,1,18,135,2017-11-09 08:36:44,0
+132620,12,1,19,265,2017-11-07 03:46:41,0
+335107,18,1,17,107,2017-11-08 23:48:07,0
+166770,3,1,22,424,2017-11-07 14:48:25,0
+197722,3,1,10,280,2017-11-07 22:49:15,0
+41666,18,1,13,134,2017-11-07 15:06:38,0
+46516,9,1,47,127,2017-11-09 08:57:57,0
+86419,2,1,20,401,2017-11-08 18:05:32,0
+4489,12,1,19,245,2017-11-07 16:34:56,0
+104132,2,1,20,237,2017-11-08 04:40:51,0
+26792,24,2,9,178,2017-11-09 11:10:13,0
+5348,12,1,13,259,2017-11-08 14:13:57,0
+108227,3,1,36,379,2017-11-07 00:57:55,0
+45723,64,1,23,459,2017-11-06 21:58:37,0
+5178,15,1,18,245,2017-11-09 05:51:17,0
+105475,2,1,14,401,2017-11-09 09:24:06,0
+209104,3,1,43,424,2017-11-07 14:42:34,0
+173226,12,1,19,178,2017-11-06 22:08:29,0
+93886,13,1,19,477,2017-11-08 09:33:30,0
+101810,3,1,27,280,2017-11-08 02:19:54,0
+351586,18,1,18,107,2017-11-09 12:45:00,0
+93207,3,1,13,280,2017-11-08 11:04:49,0
+117939,26,1,13,121,2017-11-08 07:01:21,0
+5314,151,0,0,347,2017-11-08 03:04:56,0
+30025,3,1,19,379,2017-11-08 22:29:51,0
+111400,10,1,17,113,2017-11-07 06:04:06,0
+30214,12,1,17,497,2017-11-09 08:42:00,0
+294714,18,1,13,107,2017-11-08 23:02:05,0
+201181,3,1,22,205,2017-11-06 19:42:08,0
+91168,18,1,19,134,2017-11-07 12:04:57,0
+113700,14,1,19,489,2017-11-09 04:36:11,0
+87673,2,1,15,469,2017-11-08 04:48:20,0
+15759,3,1,10,489,2017-11-08 09:13:04,0
+116469,18,1,35,439,2017-11-09 13:32:42,0
+3167,12,1,7,242,2017-11-07 10:51:40,0
+48575,6,1,19,459,2017-11-07 16:14:40,0
+45730,11,1,19,360,2017-11-07 07:45:24,0
+83238,3,1,18,280,2017-11-08 00:12:31,0
+162427,19,114,0,213,2017-11-07 06:41:55,0
+74803,1,1,22,349,2017-11-08 00:29:47,0
+10826,24,1,17,105,2017-11-08 03:05:10,0
+53454,15,1,10,140,2017-11-09 14:18:04,0
+16067,12,1,19,245,2017-11-07 17:59:27,0
+66016,110,3866,866,347,2017-11-08 22:57:05,0
+13080,18,1,26,107,2017-11-09 02:35:28,0
+108803,2,1,13,219,2017-11-08 05:40:32,0
+91611,21,1,13,128,2017-11-08 14:39:55,0
+27627,2,1,19,477,2017-11-07 14:19:28,0
+7944,1,1,15,153,2017-11-07 02:37:09,0
+20967,2,1,19,258,2017-11-08 06:59:48,0
+18219,14,1,19,478,2017-11-07 17:03:21,0
+42139,14,1,8,416,2017-11-07 01:08:55,0
+142590,3,1,19,417,2017-11-09 00:35:10,0
+42167,18,1,20,439,2017-11-07 11:43:06,0
+300702,23,1,37,153,2017-11-09 12:19:13,0
+73516,2,1,13,477,2017-11-09 05:57:05,0
+74252,12,1,53,259,2017-11-09 10:13:25,0
+88710,14,1,22,134,2017-11-08 23:43:38,0
+58705,3,1,20,371,2017-11-07 00:51:13,0
+73516,12,2,49,326,2017-11-09 03:58:14,0
+183151,2,1,8,435,2017-11-09 06:03:20,0
+146963,64,1,19,459,2017-11-07 02:41:40,0
+154111,15,1,13,245,2017-11-06 19:55:52,0
+7612,2,1,35,477,2017-11-08 06:45:51,0
+73100,14,1,13,349,2017-11-07 13:31:20,0
+67097,2,2,49,477,2017-11-07 15:44:19,0
+114678,7,59,6,101,2017-11-08 05:53:37,0
+97684,15,1,13,3,2017-11-09 14:29:36,0
+123908,12,2,17,265,2017-11-09 09:54:40,0
+31653,11,1,18,469,2017-11-07 05:28:59,0
+109386,3,1,19,280,2017-11-08 02:17:08,0
+65826,2,1,19,401,2017-11-08 05:02:30,0
+31278,14,1,19,489,2017-11-08 01:29:36,0
+134011,18,1,20,107,2017-11-07 03:41:18,0
+69029,8,1,20,145,2017-11-08 04:56:10,0
+119356,2,1,19,237,2017-11-09 00:34:04,0
+25614,2,1,19,205,2017-11-07 15:09:33,0
+4759,18,1,19,107,2017-11-07 21:52:53,1
+124024,12,1,32,259,2017-11-08 15:03:36,0
+198559,2,1,12,401,2017-11-08 13:10:12,0
+100182,7,2,19,101,2017-11-07 23:40:45,0
+50397,12,1,13,265,2017-11-09 06:57:08,0
+191290,21,1,17,128,2017-11-07 10:55:55,0
+68089,3,1,60,402,2017-11-08 17:52:42,0
+127583,12,1,16,178,2017-11-07 07:30:06,0
+181838,2,1,25,435,2017-11-07 01:55:10,0
+19014,14,1,19,379,2017-11-08 11:20:31,0
+69489,15,1,19,3,2017-11-06 22:00:25,0
+77825,6,1,13,459,2017-11-07 11:29:29,0
+49610,15,1,13,140,2017-11-07 00:59:10,0
+31065,2,1,13,377,2017-11-07 13:03:13,0
+59081,23,1,20,153,2017-11-07 14:50:45,0
+86767,14,1,13,401,2017-11-08 07:51:07,0
+51512,3,1,20,280,2017-11-08 05:48:20,0
+193994,15,1,28,265,2017-11-06 23:43:31,0
+148879,2,1,30,435,2017-11-07 11:16:53,0
+21911,3,1,19,280,2017-11-07 10:47:56,0
+104954,9,1,16,466,2017-11-09 01:44:44,0
+116066,15,1,13,245,2017-11-09 07:03:07,0
+49604,3,1,13,409,2017-11-09 06:05:44,0
+284751,12,1,19,178,2017-11-08 08:58:27,0
+75794,18,1,19,121,2017-11-06 17:17:18,0
+105649,2,2,10,205,2017-11-08 05:04:38,0
+137933,2,1,19,219,2017-11-07 02:29:10,0
+22593,3,1,13,173,2017-11-08 11:02:16,0
+48418,26,1,19,121,2017-11-08 10:32:59,0
+18781,183,3543,748,347,2017-11-07 16:23:39,0
+72986,10,1,13,113,2017-11-09 15:44:15,0
+204285,24,1,8,105,2017-11-07 14:40:17,0
+42207,3,1,22,280,2017-11-08 14:19:57,0
+7528,3,1,3,205,2017-11-07 14:28:12,0
+24592,3,1,19,19,2017-11-07 10:30:37,0
+228982,15,1,4,245,2017-11-08 03:43:48,0
+169013,15,1,13,245,2017-11-07 12:39:25,0
+157743,9,1,19,445,2017-11-09 01:34:18,0
+122145,37,1,19,21,2017-11-07 04:49:49,0
+8318,18,1,13,107,2017-11-07 16:05:00,0
+74276,14,1,17,480,2017-11-08 04:37:36,0
+86662,9,1,18,442,2017-11-08 04:51:14,0
+79155,2,1,19,452,2017-11-09 03:25:01,0
+4123,15,1,19,245,2017-11-06 17:16:02,0
+85329,12,2,20,265,2017-11-08 11:29:38,0
+1104,12,1,53,245,2017-11-07 15:43:38,0
+44458,1,1,8,371,2017-11-08 02:22:30,0
+5596,15,1,6,245,2017-11-06 16:05:58,0
+342017,9,1,18,449,2017-11-09 14:17:25,0
+136031,32,1,23,376,2017-11-08 14:43:20,0
+5147,15,1,3,245,2017-11-09 05:59:46,0
+55142,18,1,17,107,2017-11-09 10:59:29,0
+20215,24,1,44,178,2017-11-08 11:52:26,0
+54503,2,1,13,477,2017-11-09 02:25:56,0
+84040,12,1,22,328,2017-11-09 10:28:31,0
+110574,12,1,15,259,2017-11-07 11:46:02,0
+73516,3,1,14,153,2017-11-09 00:26:50,0
+34409,14,1,15,371,2017-11-07 01:17:02,0
+113848,3,1,13,442,2017-11-08 23:11:30,0
+77695,18,1,18,121,2017-11-06 23:20:11,0
+97773,3,1,22,211,2017-11-09 10:02:35,0
+34944,17,1,19,280,2017-11-08 12:47:50,0
+95468,3,1,41,115,2017-11-08 02:06:28,0
+77756,12,1,20,409,2017-11-07 14:49:12,0
+26814,12,1,6,178,2017-11-07 07:30:29,0
+4869,2,1,13,477,2017-11-07 07:38:40,0
+43793,1,1,3,134,2017-11-07 03:27:42,0
+319710,15,1,41,430,2017-11-08 21:48:46,0
+61970,23,1,18,153,2017-11-07 05:13:12,0
+69196,15,1,25,259,2017-11-06 23:47:40,0
+102280,3,2,10,137,2017-11-07 13:12:03,0
+106460,2,1,25,477,2017-11-09 06:10:50,0
+79827,12,1,13,245,2017-11-09 01:04:03,0
+15179,3,1,3,280,2017-11-07 04:16:35,0
+111121,11,1,32,319,2017-11-09 02:22:14,0
+73916,3,1,19,280,2017-11-07 07:59:23,0
+72287,8,1,13,145,2017-11-07 16:33:55,0
+6640,12,1,8,277,2017-11-08 01:15:41,0
+87399,2,1,10,477,2017-11-07 21:02:04,0
+190767,3,1,19,19,2017-11-07 07:02:28,0
+256393,18,1,17,439,2017-11-08 12:16:12,0
+106598,17,1,13,280,2017-11-08 20:19:20,0
+960,15,1,19,430,2017-11-08 04:32:39,0
+193113,7,1,13,101,2017-11-07 05:17:55,0
+175075,14,1,55,401,2017-11-07 07:18:02,0
+5314,12,1,20,245,2017-11-08 17:00:11,0
+42297,15,1,19,265,2017-11-07 15:29:42,0
+5543,9,1,6,334,2017-11-08 08:14:45,0
+130325,1,1,13,134,2017-11-09 03:01:21,0
+73487,15,1,46,265,2017-11-07 12:40:16,0
+196678,9,1,19,215,2017-11-07 02:35:19,0
+75634,9,1,19,442,2017-11-07 19:40:39,0
+71671,9,1,19,445,2017-11-08 07:33:50,0
+79095,2,1,19,205,2017-11-09 05:20:37,0
+163744,19,0,24,213,2017-11-07 11:10:43,0
+115481,12,1,6,178,2017-11-07 08:21:11,0
+49178,18,1,19,439,2017-11-08 00:26:21,0
+67751,14,1,13,349,2017-11-08 03:08:54,0
+48523,2,1,19,237,2017-11-09 04:22:28,0
+87879,18,1,18,107,2017-11-08 22:08:56,0
+3133,3,1,18,173,2017-11-08 03:16:37,0
+268701,15,1,19,138,2017-11-09 04:24:30,0
+86767,9,1,17,244,2017-11-08 05:58:26,0
+30869,1,1,25,439,2017-11-08 13:18:36,0
+34139,15,1,26,130,2017-11-08 05:44:11,0
+108858,13,1,13,400,2017-11-07 04:33:26,0
+95766,12,1,1,245,2017-11-07 05:45:04,0
+38866,2,1,27,237,2017-11-09 03:48:25,0
+53960,21,2,9,128,2017-11-06 22:03:49,0
+68271,18,1,18,107,2017-11-09 05:00:59,0
+72723,18,1,19,449,2017-11-09 02:50:20,0
+173808,3,1,13,135,2017-11-07 05:47:06,0
+347004,3,1,18,442,2017-11-09 08:55:13,0
+5314,3,1,28,280,2017-11-07 06:08:44,0
+111078,18,1,12,107,2017-11-07 10:02:08,0
+66769,15,1,25,412,2017-11-08 00:21:03,0
+84395,15,1,13,130,2017-11-09 04:26:46,0
+16010,9,1,13,127,2017-11-09 04:37:12,0
+55960,13,1,22,477,2017-11-07 04:26:31,0
+245560,3,1,18,480,2017-11-08 10:36:17,0
+116681,8,1,25,145,2017-11-09 09:56:46,0
+236755,2,1,13,477,2017-11-09 05:34:13,0
+209984,3,1,13,173,2017-11-07 00:53:55,0
+9099,18,1,10,107,2017-11-09 02:23:09,0
+92766,15,1,13,430,2017-11-07 02:39:28,0
+80568,12,1,18,140,2017-11-09 05:05:41,0
+62906,9,1,13,445,2017-11-08 13:29:13,0
+32395,15,1,19,245,2017-11-07 15:29:43,0
+209944,12,1,19,178,2017-11-09 09:16:18,0
+159775,2,2,17,364,2017-11-06 19:05:04,0
+87879,3,1,13,442,2017-11-09 03:01:16,0
+182103,9,1,10,134,2017-11-06 22:09:28,0
+84817,11,1,13,173,2017-11-08 14:38:14,0
+49431,18,1,22,317,2017-11-07 05:46:36,0
+109644,14,1,6,442,2017-11-07 23:44:04,0
+32526,14,1,13,480,2017-11-09 15:29:23,0
+109540,19,0,24,213,2017-11-07 15:05:51,0
+75644,21,1,35,128,2017-11-08 07:26:20,0
+14301,3,1,41,280,2017-11-08 15:45:39,0
+27388,3,2,77,402,2017-11-07 02:55:25,0
+94028,9,1,16,134,2017-11-09 00:39:50,0
+7800,2,1,14,243,2017-11-07 15:26:56,0
+97952,2,1,53,122,2017-11-08 04:50:51,0
+22528,12,1,3,497,2017-11-09 02:25:20,0
+98568,15,1,26,111,2017-11-09 13:43:10,0
+71272,15,1,22,386,2017-11-07 15:38:18,0
+127531,8,1,41,145,2017-11-07 21:53:28,0
+40398,9,1,19,334,2017-11-09 06:11:27,0
+37001,9,1,1,134,2017-11-08 01:07:54,0
+169337,26,1,13,266,2017-11-07 02:47:23,0
+50136,3,1,19,130,2017-11-07 04:19:42,0
+95702,3,1,6,317,2017-11-08 07:33:19,0
+141511,18,1,16,107,2017-11-07 12:00:30,0
+26995,9,2,77,215,2017-11-07 14:24:00,0
+179199,18,2,3,134,2017-11-09 08:05:57,0
+75634,12,1,27,105,2017-11-09 02:48:07,0
+127233,3,1,13,280,2017-11-07 05:14:21,0
+124985,14,1,18,349,2017-11-07 11:33:53,0
+235526,14,1,13,349,2017-11-08 03:26:44,0
+80510,3,1,18,280,2017-11-09 02:23:27,0
+6242,12,1,25,205,2017-11-07 04:54:50,0
+39158,3,1,20,452,2017-11-08 06:11:31,0
+43827,13,1,18,477,2017-11-07 00:45:41,0
+67708,9,1,10,489,2017-11-07 14:36:29,0
+18996,3,1,19,489,2017-11-09 07:47:20,0
+182643,6,1,19,459,2017-11-07 07:19:00,0
+265034,3,1,13,280,2017-11-08 02:02:03,0
+120775,9,1,13,442,2017-11-08 22:23:47,0
+73487,12,2,49,326,2017-11-09 05:07:07,0
+71238,14,1,19,489,2017-11-07 05:37:08,0
+814,2,1,30,377,2017-11-09 11:05:22,0
+99897,12,1,42,259,2017-11-07 07:32:33,0
+124198,12,1,23,340,2017-11-09 11:50:24,0
+5348,15,1,13,245,2017-11-09 04:49:25,0
+73671,9,2,77,244,2017-11-08 08:51:31,0
+185712,9,1,22,490,2017-11-09 05:48:34,0
+51745,12,1,17,205,2017-11-08 21:50:05,0
+38888,3,1,31,280,2017-11-08 02:05:56,0
+59899,14,1,25,379,2017-11-09 11:34:30,0
+8230,3,1,19,280,2017-11-07 05:09:28,0
+50482,2,1,18,477,2017-11-09 03:14:29,0
+105475,3,1,19,205,2017-11-07 21:34:31,0
+173141,12,2,17,265,2017-11-08 09:32:57,0
+39782,18,1,19,107,2017-11-08 10:49:27,0
+187662,3,1,13,280,2017-11-08 05:49:41,0
+81598,6,1,25,125,2017-11-08 14:57:34,0
+3262,2,1,13,212,2017-11-08 00:32:01,0
+84644,9,1,13,442,2017-11-07 12:03:55,0
+114326,12,1,13,245,2017-11-07 00:15:30,0
+73503,3,1,8,466,2017-11-07 06:20:56,0
+13634,9,1,19,442,2017-11-09 11:44:46,0
+275129,9,1,13,134,2017-11-07 16:30:16,0
+136291,3,1,19,280,2017-11-09 05:57:48,0
+121679,2,1,13,469,2017-11-09 05:48:33,0
+23289,18,1,6,107,2017-11-09 05:44:58,0
+145769,26,1,22,266,2017-11-08 23:41:22,0
+92642,18,1,26,121,2017-11-08 02:01:53,0
+86261,3,1,6,137,2017-11-09 12:03:14,0
+83659,3,1,18,424,2017-11-07 12:16:23,0
+121384,15,1,15,315,2017-11-07 23:54:15,0
+126331,9,1,19,127,2017-11-09 15:42:16,0
+59263,14,1,3,463,2017-11-07 05:50:13,0
+44067,18,1,19,107,2017-11-09 06:31:15,0
+8352,15,1,19,245,2017-11-09 01:35:16,0
+4680,15,1,14,430,2017-11-07 15:48:23,0
+55642,10,1,19,317,2017-11-07 12:01:18,0
+12506,3,2,13,211,2017-11-08 14:24:02,0
+59441,18,1,19,376,2017-11-09 06:52:06,0
+6424,15,1,19,140,2017-11-09 11:48:54,0
+82234,9,1,27,107,2017-11-08 22:03:01,0
+149535,2,1,13,469,2017-11-08 05:09:19,0
+31732,3,1,19,280,2017-11-09 06:15:06,0
+23761,9,1,13,442,2017-11-07 13:35:15,0
+37972,3,1,44,489,2017-11-08 22:58:59,0
+208952,1,2,9,125,2017-11-07 06:58:48,0
+89411,18,1,22,107,2017-11-07 06:02:00,0
+64079,13,1,13,400,2017-11-08 00:23:26,0
+65207,9,1,19,244,2017-11-07 13:21:52,0
+90837,13,1,32,477,2017-11-07 07:02:36,0
+43855,1,1,12,153,2017-11-07 01:00:36,0
+151109,13,1,19,477,2017-11-07 03:42:26,0
+59361,12,1,17,481,2017-11-07 10:34:34,0
+192096,2,1,35,122,2017-11-09 11:56:59,0
+72666,20,1,8,259,2017-11-07 12:10:46,0
+105456,2,1,26,205,2017-11-09 12:47:41,0
+118292,12,1,22,265,2017-11-09 04:46:32,0
+48212,2,1,14,401,2017-11-07 13:53:20,0
+103396,12,1,13,178,2017-11-07 04:47:54,0
+123788,3,1,13,452,2017-11-08 14:18:19,0
+32453,12,1,10,259,2017-11-07 09:43:44,0
+37109,2,1,17,236,2017-11-06 22:46:12,0
+119375,18,3866,866,107,2017-11-09 10:18:34,0
+80949,9,1,19,334,2017-11-09 11:17:23,0
+111755,1,1,37,134,2017-11-09 14:34:20,0
+250443,26,1,13,121,2017-11-07 16:08:18,0
+28471,3,1,62,480,2017-11-09 08:51:20,0
+81107,1,1,18,377,2017-11-07 12:03:02,0
+84954,3,1,13,280,2017-11-09 07:23:38,0
+332627,3,1,13,489,2017-11-09 03:35:38,0
+119289,3,1,19,205,2017-11-08 19:04:23,0
+119885,3,1,10,402,2017-11-08 11:57:06,0
+111025,15,1,13,245,2017-11-08 16:59:25,0
+35282,8,1,13,145,2017-11-09 09:12:20,0
+66525,3,1,19,280,2017-11-08 00:25:48,0
+185787,3,1,19,280,2017-11-08 15:17:19,0
+215008,14,1,19,134,2017-11-08 06:24:30,0
+31542,10,1,11,113,2017-11-07 12:41:22,0
+126686,12,1,16,265,2017-11-07 15:40:20,0
+136665,18,1,25,439,2017-11-07 07:20:41,0
+28136,14,1,32,379,2017-11-07 02:47:58,0
+114276,21,1,3,128,2017-11-08 12:44:39,0
+49383,3,2,17,280,2017-11-08 03:01:36,0
+60314,1,1,10,153,2017-11-09 00:39:37,0
+60037,36,1,17,110,2017-11-06 23:07:34,1
+104512,25,1,19,259,2017-11-06 23:20:58,0
+100393,2,1,36,219,2017-11-08 15:41:28,0
+211313,12,1,9,481,2017-11-07 22:40:22,0
+118991,2,1,13,435,2017-11-08 13:33:14,0
+30433,12,1,32,328,2017-11-09 15:15:51,0
+106437,18,1,22,121,2017-11-08 16:25:18,0
+5348,18,1,19,121,2017-11-09 14:19:52,0
+104968,15,1,19,130,2017-11-09 07:45:57,0
+76184,6,1,19,459,2017-11-08 13:45:37,0
+5348,2,1,30,469,2017-11-07 13:05:27,0
+247238,32,1,53,376,2017-11-08 02:00:45,0
+88971,18,3032,607,107,2017-11-07 08:58:37,0
+109242,3,1,19,409,2017-11-08 13:12:15,0
+14290,3,1,19,424,2017-11-09 13:41:50,0
+90966,2,1,58,435,2017-11-07 15:51:53,0
+105560,3,1,17,280,2017-11-08 12:48:43,0
+123907,18,1,18,107,2017-11-09 12:31:41,0
+79779,51,0,38,203,2017-11-06 22:53:51,0
+53665,1,1,18,452,2017-11-07 23:12:54,0
+73487,2,1,13,236,2017-11-08 22:05:45,0
+167094,15,1,19,480,2017-11-09 01:42:41,0
+17289,18,1,27,121,2017-11-07 05:41:27,0
+50033,9,1,15,232,2017-11-08 12:30:35,0
+75595,15,1,15,265,2017-11-08 15:02:08,0
+113875,15,1,18,265,2017-11-08 13:39:41,0
+6965,14,1,20,463,2017-11-07 11:09:04,0
+127537,15,1,3,153,2017-11-08 06:26:05,0
+40204,2,1,13,477,2017-11-08 15:12:28,0
+44498,26,1,19,266,2017-11-09 03:02:12,0
+119375,3,1,19,379,2017-11-09 06:02:01,0
+101919,3,1,19,113,2017-11-08 02:29:54,0
+55893,3,1,18,280,2017-11-08 16:05:39,0
+111191,14,1,15,442,2017-11-06 16:01:41,0
+189149,3,1,18,280,2017-11-08 10:58:10,0
+44527,12,2,8,140,2017-11-08 12:59:29,0
+78942,3,1,19,442,2017-11-09 15:03:12,0
+31240,2,2,19,364,2017-11-08 14:47:11,0
+191691,25,1,19,259,2017-11-07 05:47:27,0
+143888,14,1,3,401,2017-11-07 00:15:44,0
+81571,15,1,3,3,2017-11-07 23:06:39,0
+133298,9,1,13,334,2017-11-07 07:42:37,0
+71575,9,1,13,215,2017-11-09 07:12:13,0
+50390,18,1,19,121,2017-11-06 19:23:39,0
+62397,9,1,13,134,2017-11-07 00:33:08,0
+106223,9,1,13,134,2017-11-06 16:20:49,0
+84953,14,1,13,401,2017-11-07 14:37:06,0
+92511,2,2,32,364,2017-11-08 00:11:45,0
+18126,12,1,25,178,2017-11-09 03:23:37,0
+121759,64,1,25,459,2017-11-07 03:00:26,0
+99226,14,1,10,442,2017-11-08 15:15:33,0
+1235,12,1,13,328,2017-11-09 09:23:16,0
+10595,9,1,37,334,2017-11-08 00:19:10,0
+118597,3,1,19,489,2017-11-06 16:15:57,0
+148849,3,1,19,280,2017-11-07 10:29:37,0
+75794,9,1,16,466,2017-11-08 12:46:57,0
+105560,13,1,25,477,2017-11-08 04:57:59,0
+119236,23,1,6,153,2017-11-07 06:57:36,0
+71128,8,1,19,145,2017-11-07 09:18:54,0
+110426,15,1,13,245,2017-11-07 13:18:33,0
+70056,3,1,31,137,2017-11-08 12:37:26,0
+50631,26,1,13,121,2017-11-08 08:59:26,0
+34295,15,1,19,245,2017-11-08 17:16:38,0
+126043,2,1,19,469,2017-11-09 08:58:53,0
+26785,2,1,16,219,2017-11-08 07:31:28,0
+8383,3,1,4,135,2017-11-09 09:04:59,0
+35335,3,1,19,489,2017-11-09 04:07:09,0
+162182,15,1,6,430,2017-11-08 07:36:54,0
+204788,7,1,13,101,2017-11-09 06:05:57,0
+37002,12,1,19,178,2017-11-09 02:08:27,0
+5348,183,3032,607,347,2017-11-07 15:31:57,0
+2805,64,1,19,459,2017-11-07 15:40:33,0
+97491,2,1,6,219,2017-11-09 00:54:45,0
+166529,22,1,13,116,2017-11-07 11:14:19,0
+117045,18,1,13,107,2017-11-09 11:04:51,0
+89565,9,1,13,334,2017-11-08 03:38:42,0
+319397,9,1,58,466,2017-11-09 12:02:27,0
+59118,2,1,13,205,2017-11-08 16:56:37,0
+92904,3,1,13,417,2017-11-09 14:39:34,0
+33691,7,1,8,101,2017-11-09 06:06:41,0
+251446,26,1,19,121,2017-11-07 21:31:40,0
+72831,3,1,19,115,2017-11-07 12:34:54,0
+160620,2,1,6,435,2017-11-08 03:23:37,0
+112086,14,1,19,463,2017-11-07 22:41:47,0
+63212,3,1,18,137,2017-11-08 14:51:40,0
+167719,2,1,6,477,2017-11-07 01:08:32,0
+53454,6,1,30,459,2017-11-08 10:49:19,0
+178873,6,1,22,459,2017-11-06 18:04:13,0
+125288,12,1,20,245,2017-11-09 00:46:45,0
+199680,12,1,19,245,2017-11-06 19:00:44,0
+46810,12,1,17,424,2017-11-07 00:13:05,0
+111025,1,1,19,349,2017-11-07 05:37:33,0
+165439,9,1,13,107,2017-11-09 09:47:02,0
+198525,18,1,20,107,2017-11-09 02:36:59,0
+20451,18,1,19,134,2017-11-08 08:53:35,0
+18837,12,1,15,245,2017-11-07 10:39:51,0
+48245,3,1,13,480,2017-11-08 06:48:52,0
+5147,15,1,19,265,2017-11-09 09:12:09,0
+80048,7,1,6,101,2017-11-09 08:15:53,0
+95631,15,1,12,480,2017-11-07 03:57:44,0
+39762,11,1,15,487,2017-11-08 01:40:31,0
+89047,6,1,13,459,2017-11-08 01:26:56,0
+53479,2,1,13,236,2017-11-07 09:05:44,0
+90557,12,1,19,409,2017-11-09 06:41:12,0
+54524,2,2,53,205,2017-11-09 04:28:25,0
+119349,27,1,19,153,2017-11-07 06:27:14,0
+11245,14,1,19,349,2017-11-07 00:00:08,0
+127081,6,1,13,459,2017-11-07 23:12:10,0
+9631,2,1,19,122,2017-11-07 06:10:10,0
+14737,3,1,13,480,2017-11-09 13:30:09,0
+233808,1,1,3,125,2017-11-08 15:05:28,0
+5178,3,1,19,452,2017-11-09 06:13:38,0
+58097,15,1,13,245,2017-11-08 03:53:29,0
+68724,2,1,19,237,2017-11-09 04:33:26,0
+5449,25,1,17,259,2017-11-09 13:06:43,0
+307556,9,1,22,466,2017-11-09 08:44:17,0
+50164,11,1,18,137,2017-11-09 01:12:43,0
+132992,9,1,19,334,2017-11-07 10:54:12,0
+102225,18,1,12,107,2017-11-08 03:53:13,0
+74122,24,1,13,178,2017-11-08 12:57:07,0
+205353,21,1,13,128,2017-11-09 04:48:13,0
+190786,23,1,13,153,2017-11-09 03:52:14,0
+11185,1,1,13,134,2017-11-06 16:01:00,0
+63812,15,1,19,315,2017-11-08 11:49:13,0
+251823,20,1,18,259,2017-11-09 11:43:59,0
+104839,14,1,8,489,2017-11-08 00:31:13,0
+213012,2,1,16,435,2017-11-09 10:00:57,0
+1821,9,1,19,334,2017-11-07 12:36:55,0
+8536,9,1,8,334,2017-11-06 23:27:58,0
+16401,14,1,13,401,2017-11-09 03:49:35,0
+96083,6,1,22,101,2017-11-08 10:58:39,0
+82716,1,1,13,134,2017-11-09 06:22:29,0
+64435,6,1,19,125,2017-11-07 21:44:00,0
+66258,7,1,13,101,2017-11-09 08:35:26,0
+50979,2,1,14,364,2017-11-07 17:12:41,0
+167895,12,1,37,259,2017-11-07 11:47:22,0
+27294,3,1,13,280,2017-11-08 12:17:36,0
+60136,2,1,17,205,2017-11-06 23:59:31,0
+50512,15,1,13,245,2017-11-08 17:35:53,0
+111493,14,1,18,349,2017-11-09 11:53:15,0
+77445,2,1,13,237,2017-11-08 04:57:58,0
+63840,3,1,17,135,2017-11-08 05:16:33,0
+221767,27,1,26,153,2017-11-08 03:37:48,0
+169081,2,1,13,243,2017-11-07 07:29:43,0
+283472,26,1,18,121,2017-11-08 09:19:51,0
+15474,3,1,19,211,2017-11-06 20:47:47,0
+76800,3,1,132,280,2017-11-08 02:55:44,0
+98001,6,1,19,459,2017-11-07 00:54:24,0
+51701,18,1,1,107,2017-11-08 01:18:48,0
+21013,14,1,19,480,2017-11-09 02:42:31,0
+104405,3,1,13,280,2017-11-09 02:27:04,0
+91584,9,1,13,466,2017-11-08 14:38:28,0
+25158,35,1,31,21,2017-11-07 13:46:17,0
+48170,2,1,19,237,2017-11-08 13:52:14,0
+84445,15,1,1,430,2017-11-08 09:20:42,0
+93226,7,1,19,101,2017-11-09 12:13:09,0
+140344,28,1,19,135,2017-11-07 20:30:31,0
+66184,13,1,14,469,2017-11-09 01:16:41,0
+125217,14,1,8,463,2017-11-07 06:19:15,0
+100602,2,1,18,236,2017-11-08 06:53:50,0
+84896,2,1,19,469,2017-11-09 12:42:40,0
+24039,14,1,17,463,2017-11-08 13:51:20,0
+351685,9,1,18,215,2017-11-09 08:05:31,0
+102515,3,1,19,379,2017-11-07 01:03:30,0
+89572,18,1,17,107,2017-11-07 22:58:38,0
+98321,9,1,13,215,2017-11-08 12:40:41,0
+80743,14,1,6,480,2017-11-09 00:12:58,0
+62863,23,1,20,153,2017-11-07 00:04:09,0
+287448,3,1,20,280,2017-11-08 04:26:22,0
+86078,2,1,19,469,2017-11-09 04:14:46,0
+62915,3,1,13,480,2017-11-09 00:37:19,0
+48855,9,1,13,450,2017-11-09 11:04:40,0
+159761,2,1,13,364,2017-11-08 14:53:35,0
+59043,9,1,13,127,2017-11-08 23:43:14,0
+625,18,1,15,107,2017-11-08 21:15:32,0
+46220,18,1,12,134,2017-11-08 19:35:33,0
+32639,23,1,19,153,2017-11-09 03:07:04,0
+27983,3,1,12,280,2017-11-09 05:08:29,0
+106774,32,1,19,376,2017-11-09 10:36:15,0
+170153,9,1,25,134,2017-11-09 02:34:03,0
+108341,12,1,19,409,2017-11-07 13:17:04,0
+102065,3,1,3,280,2017-11-08 13:22:52,0
+1524,2,1,13,477,2017-11-08 12:35:34,0
+151507,12,1,23,245,2017-11-07 06:47:15,0
+241308,18,1,9,107,2017-11-09 02:32:33,0
+111298,18,1,19,121,2017-11-09 13:44:59,0
+68963,2,1,19,469,2017-11-09 07:53:55,0
+39209,3,1,15,280,2017-11-07 06:35:37,0
+22593,18,1,13,134,2017-11-08 11:01:55,0
+112049,15,1,9,430,2017-11-09 00:50:38,0
+60527,22,1,19,116,2017-11-09 09:05:25,0
+109734,13,1,19,400,2017-11-07 05:12:43,0
+2228,12,1,6,265,2017-11-09 00:31:14,0
+66594,15,1,10,430,2017-11-07 16:12:12,0
+192452,15,1,35,130,2017-11-08 05:00:14,0
+94240,74,1,19,21,2017-11-07 09:14:26,0
+52143,18,1,40,107,2017-11-08 08:28:12,0
+136987,14,1,19,489,2017-11-08 14:07:05,0
+173467,8,1,35,145,2017-11-09 08:40:04,0
+137678,12,2,18,178,2017-11-07 23:24:08,0
+181082,3,1,13,280,2017-11-08 03:34:52,0
+59125,27,2,19,122,2017-11-08 22:11:29,0
+117651,1,1,13,135,2017-11-07 00:25:33,0
+4919,9,1,19,215,2017-11-07 00:10:18,0
+203637,2,1,23,477,2017-11-07 01:46:40,0
+241058,26,1,13,266,2017-11-08 06:09:03,0
+18788,12,1,19,265,2017-11-07 04:21:27,0
+100623,18,3032,607,107,2017-11-07 11:59:16,0
+91574,2,1,19,205,2017-11-09 02:28:56,0
+18584,9,1,19,334,2017-11-09 10:04:36,0
+147678,20,1,19,259,2017-11-07 06:37:23,0
+93150,12,1,25,497,2017-11-08 06:45:33,0
+203223,27,1,36,122,2017-11-07 09:52:42,0
+103337,9,1,3,445,2017-11-09 08:01:12,0
+48836,9,1,13,107,2017-11-09 12:10:53,0
+162293,28,1,19,135,2017-11-08 16:25:00,0
+171257,14,1,19,379,2017-11-09 15:01:59,0
+7645,3,1,13,280,2017-11-07 01:32:08,0
+35040,12,1,23,19,2017-11-07 10:20:02,0
+76718,9,1,13,258,2017-11-06 16:50:41,0
+83699,12,1,41,340,2017-11-08 14:41:53,0
+5348,18,1,25,439,2017-11-07 23:03:19,0
+342481,3,1,11,489,2017-11-09 09:47:23,0
+48231,9,1,19,334,2017-11-06 23:30:50,0
+9521,3,1,16,480,2017-11-07 23:39:12,0
+84428,21,1,18,232,2017-11-07 13:41:57,0
+108942,15,1,37,153,2017-11-07 13:31:21,0
+35300,19,0,0,213,2017-11-09 09:26:14,0
+8632,12,1,17,178,2017-11-08 05:05:39,0
+163523,15,1,13,153,2017-11-09 00:08:59,0
+36801,26,1,8,266,2017-11-06 22:50:12,0
+23286,23,1,22,153,2017-11-08 09:20:36,0
+71417,26,1,19,477,2017-11-08 09:54:14,0
+52225,3,1,13,442,2017-11-07 13:30:04,0
+130620,15,1,19,111,2017-11-06 16:48:41,0
+52024,12,1,15,245,2017-11-08 03:51:18,0
+32526,9,1,19,232,2017-11-09 05:19:32,0
+188957,3,1,12,280,2017-11-08 09:01:41,0
+7373,64,1,19,459,2017-11-07 16:54:42,0
+335227,12,1,19,265,2017-11-09 09:15:49,0
+50307,6,1,18,459,2017-11-08 05:21:05,0
+35658,3,1,13,442,2017-11-08 22:24:26,0
+114663,3,1,19,280,2017-11-08 00:54:43,0
+28498,15,1,47,386,2017-11-09 03:12:14,0
+110172,7,1,19,101,2017-11-09 05:36:02,0
+100275,3,1,19,280,2017-11-08 13:03:17,0
+248105,3,1,19,280,2017-11-08 04:39:41,0
+119138,15,1,19,3,2017-11-09 00:39:35,0
+50938,27,1,19,122,2017-11-08 07:13:41,0
+57646,3,1,19,280,2017-11-08 13:03:12,0
+140338,15,1,8,245,2017-11-08 14:07:34,0
+67439,24,2,19,105,2017-11-06 21:37:16,0
+288578,3,1,55,211,2017-11-09 01:57:42,0
+25327,2,1,13,364,2017-11-07 11:07:13,0
+90922,14,1,13,463,2017-11-07 04:55:20,0
+220613,13,1,17,477,2017-11-08 13:52:16,0
+82015,3,1,9,280,2017-11-08 11:38:59,0
+3332,12,1,25,265,2017-11-08 00:12:25,0
+15026,2,1,1,477,2017-11-07 15:32:13,0
+75422,3,1,18,442,2017-11-09 04:18:15,0
+93587,13,1,12,477,2017-11-09 08:13:43,0
+117979,12,1,23,245,2017-11-07 05:22:12,0
+344579,9,1,25,489,2017-11-09 05:06:41,0
+96815,2,1,41,401,2017-11-08 08:43:23,0
+96105,2,1,18,122,2017-11-08 00:01:37,0
+20961,3,1,19,280,2017-11-09 02:03:18,0
+41265,3,1,18,205,2017-11-08 12:27:06,0
+127804,12,1,13,178,2017-11-09 15:16:50,0
+95766,2,1,13,237,2017-11-07 13:19:27,0
+65710,3,1,16,280,2017-11-08 11:01:52,0
+157880,12,1,16,340,2017-11-09 06:28:15,0
+61894,12,2,9,140,2017-11-09 00:07:32,0
+29161,3,1,13,280,2017-11-07 02:57:11,0
+91799,12,1,13,19,2017-11-08 10:53:14,0
+93057,18,1,13,134,2017-11-07 14:46:17,0
+336917,9,1,19,134,2017-11-08 17:22:37,0
+210561,18,1,13,121,2017-11-07 00:15:55,0
+207144,9,1,13,215,2017-11-08 08:54:43,0
+81211,9,1,35,232,2017-11-09 04:48:08,0
+30564,25,2,13,259,2017-11-08 12:15:12,0
+9140,21,1,13,128,2017-11-07 22:29:41,0
+75504,15,1,3,379,2017-11-08 00:19:26,0
+190191,9,1,25,244,2017-11-07 11:54:37,0
+183964,9,1,19,334,2017-11-07 10:59:49,0
+122199,9,1,13,334,2017-11-07 15:16:01,0
+153706,3,1,19,173,2017-11-09 06:03:52,0
+118157,12,1,11,265,2017-11-08 01:14:01,0
+42343,18,1,13,439,2017-11-08 04:48:26,0
+53585,12,1,37,265,2017-11-08 08:05:44,0
+31544,12,1,8,242,2017-11-09 06:16:08,0
+53964,11,1,13,481,2017-11-07 15:32:43,0
+174080,3,1,1,280,2017-11-07 13:46:06,0
+53964,1,1,46,13,2017-11-09 13:25:56,0
+166017,3,1,13,173,2017-11-07 08:26:05,0
+71641,3,1,19,480,2017-11-08 08:01:22,0
+44527,14,1,19,349,2017-11-07 11:41:29,0
+115663,2,1,22,236,2017-11-09 03:04:43,0
+28956,3,1,19,280,2017-11-07 04:14:14,0
+40194,2,1,53,401,2017-11-08 10:48:29,0
+44051,14,1,3,349,2017-11-07 11:41:53,0
+23878,12,1,18,259,2017-11-07 14:27:04,0
+101951,15,1,53,245,2017-11-07 16:26:16,0
+116270,18,1,20,107,2017-11-09 09:37:41,0
+63248,12,1,11,124,2017-11-08 04:31:08,0
+93780,12,1,15,328,2017-11-08 10:18:35,0
+158980,18,1,13,134,2017-11-07 12:37:24,0
+50059,15,1,3,278,2017-11-07 00:28:30,0
+109463,7,1,19,101,2017-11-09 09:31:33,0
+65598,12,1,19,245,2017-11-08 10:44:50,0
+55415,3,1,20,280,2017-11-08 16:14:39,0
+79745,27,1,14,153,2017-11-08 12:18:54,0
+43855,3,1,19,137,2017-11-09 09:11:13,0
+22792,1,1,3,178,2017-11-07 01:01:39,0
+109890,3,1,35,280,2017-11-08 07:18:25,0
+7944,3,1,22,379,2017-11-08 00:44:10,0
+141999,15,1,1,412,2017-11-07 02:04:58,0
+28176,12,1,20,178,2017-11-07 09:17:01,0
+5348,21,1,13,128,2017-11-09 04:51:39,0
+186145,2,1,35,377,2017-11-07 12:00:01,0
+53715,18,1,18,107,2017-11-09 10:27:49,0
+108075,27,1,13,153,2017-11-09 13:32:13,0
+78648,2,1,17,477,2017-11-07 17:37:25,0
+9876,2,1,18,212,2017-11-08 20:05:46,0
+5348,12,2,13,265,2017-11-08 00:26:38,0
+10428,9,1,23,127,2017-11-09 10:26:32,0
+83366,18,1,8,107,2017-11-09 15:48:04,0
+55569,3,1,3,173,2017-11-08 17:44:47,0
+33012,3,1,12,173,2017-11-09 14:43:55,0
+39059,14,1,13,489,2017-11-07 04:09:23,0
+153181,12,1,18,409,2017-11-08 03:32:04,0
+40207,2,1,13,477,2017-11-09 06:43:05,0
+116209,12,1,22,178,2017-11-07 10:40:18,0
+161544,12,1,13,178,2017-11-08 02:14:00,0
+14374,3,1,19,280,2017-11-08 15:46:06,0
+43255,3,1,22,442,2017-11-07 23:37:24,0
+105606,15,1,13,245,2017-11-08 20:59:02,0
+38265,15,1,11,153,2017-11-07 19:10:24,0
+178099,3,1,25,130,2017-11-07 04:19:37,0
+177243,18,1,20,107,2017-11-07 00:58:46,0
+107155,3,1,13,480,2017-11-08 13:30:58,0
+21162,1,1,19,13,2017-11-08 02:23:49,0
+182003,15,1,19,245,2017-11-06 16:59:31,0
+22244,12,1,16,259,2017-11-08 06:43:58,0
+104366,13,1,19,400,2017-11-07 09:25:50,0
+7840,3,1,8,280,2017-11-09 00:37:36,0
+6750,2,1,19,258,2017-11-07 09:14:21,0
+64341,2,1,19,237,2017-11-07 05:31:15,0
+173141,14,1,19,463,2017-11-07 15:17:26,0
+92735,9,1,15,442,2017-11-09 06:11:12,0
+97341,15,1,15,130,2017-11-07 22:15:24,0
+271500,20,1,19,259,2017-11-07 22:40:13,0
+109333,3,1,13,280,2017-11-07 04:59:28,0
+195048,9,1,19,489,2017-11-07 12:21:54,0
+190496,3,1,17,280,2017-11-07 08:16:55,0
+88358,64,1,13,459,2017-11-06 16:02:04,0
+211889,18,1,40,107,2017-11-07 09:30:52,0
+76727,13,1,19,477,2017-11-06 22:48:46,0
+7243,9,1,37,489,2017-11-08 09:03:21,0
+33525,3,1,19,280,2017-11-07 00:01:21,0
+52333,12,1,14,245,2017-11-08 17:42:43,0
+89792,3,1,1,424,2017-11-09 01:46:02,0
+88727,18,1,12,107,2017-11-09 09:52:55,0
+6123,9,1,19,215,2017-11-09 04:36:52,0
+109480,18,1,27,376,2017-11-07 14:33:24,0
+236755,3,1,41,115,2017-11-08 16:34:56,0
+77540,3,1,22,130,2017-11-09 07:01:10,0
+127254,12,1,20,265,2017-11-07 06:06:12,0
+250288,8,1,6,145,2017-11-08 22:52:05,0
+64741,18,1,19,134,2017-11-09 01:44:32,0
+26454,3,1,19,409,2017-11-09 12:13:59,0
+4630,17,1,10,280,2017-11-07 05:43:17,0
+104621,18,1,37,107,2017-11-09 09:56:11,0
+204001,3,1,19,371,2017-11-08 01:08:05,0
+67806,12,2,13,178,2017-11-09 11:26:21,0
+80388,25,1,9,259,2017-11-07 06:37:03,0
+9795,12,1,37,259,2017-11-08 13:00:05,0
+55722,3,1,17,30,2017-11-09 07:19:52,0
+160220,12,1,26,245,2017-11-07 14:16:55,0
+113786,9,1,13,442,2017-11-08 01:50:48,0
+120546,12,1,20,178,2017-11-09 09:07:07,0
+175837,15,1,18,412,2017-11-08 13:08:48,0
+108697,26,1,17,121,2017-11-07 23:55:20,0
+49541,15,1,13,379,2017-11-07 08:29:00,0
+7308,1,1,9,124,2017-11-09 01:57:32,0
+184702,18,1,14,134,2017-11-08 00:33:37,0
+52549,11,1,19,137,2017-11-08 15:17:55,0
+11170,9,1,19,334,2017-11-07 08:37:16,0
+43793,2,1,13,477,2017-11-09 10:23:54,0
+5314,18,1,15,121,2017-11-09 15:55:06,0
+5348,12,1,19,145,2017-11-09 05:55:55,0
+86405,3,1,17,280,2017-11-08 05:46:24,0
+178851,2,1,15,205,2017-11-08 10:31:28,0
+104397,3,1,19,424,2017-11-09 14:21:31,0
+110176,8,1,13,145,2017-11-08 23:25:34,0
+107204,3,1,19,153,2017-11-08 09:35:06,0
+79332,6,1,13,459,2017-11-09 14:03:31,0
+114878,3,1,19,280,2017-11-08 14:38:36,0
+18165,12,1,9,265,2017-11-09 13:48:31,0
+163712,18,1,22,317,2017-11-07 04:24:12,0
+163785,9,1,13,466,2017-11-09 06:54:02,0
+22567,14,1,19,379,2017-11-08 04:27:37,0
+149500,20,2,42,259,2017-11-07 14:20:44,0
+114878,2,2,49,205,2017-11-08 13:02:30,0
+125222,3,1,13,442,2017-11-09 12:22:23,0
+45275,15,1,17,140,2017-11-06 16:32:06,0
+37502,12,1,9,178,2017-11-07 10:17:14,0
+178851,2,1,19,205,2017-11-08 00:18:17,0
+162574,6,1,10,125,2017-11-08 13:33:32,0
+271903,12,1,18,178,2017-11-08 10:49:07,0
+100339,2,1,18,477,2017-11-08 13:03:05,0
+22804,18,1,19,134,2017-11-07 14:45:33,0
+120709,14,1,19,349,2017-11-09 13:06:06,0
+69595,8,1,13,259,2017-11-07 07:13:50,0
+184271,12,1,19,178,2017-11-08 08:41:39,0
+21274,3,1,6,173,2017-11-09 06:48:31,0
+101367,2,1,13,469,2017-11-07 05:46:31,0
+118410,26,1,53,121,2017-11-08 05:39:38,0
+67628,2,1,19,122,2017-11-09 14:32:04,0
+116407,14,1,17,489,2017-11-08 02:31:05,0
+22006,12,1,14,328,2017-11-07 11:42:18,0
+33643,18,1,37,107,2017-11-09 09:37:53,0
+40535,9,1,3,334,2017-11-09 00:19:47,0
+112377,3,1,17,442,2017-11-08 00:51:12,0
+41725,3,1,8,280,2017-11-08 04:10:47,0
+37375,18,1,13,107,2017-11-06 23:49:35,0
+73516,12,2,13,178,2017-11-07 03:03:26,0
+116198,8,1,25,145,2017-11-09 08:04:51,0
+8019,3,1,13,452,2017-11-08 08:19:36,0
+23130,12,1,19,265,2017-11-06 17:19:17,0
+26995,2,1,19,219,2017-11-09 14:10:35,0
+6860,3,1,22,466,2017-11-07 11:11:40,0
+109776,18,1,37,107,2017-11-08 12:31:31,0
+73487,9,2,13,234,2017-11-08 09:57:54,0
+64268,15,1,13,153,2017-11-08 17:57:49,0
+93587,12,1,32,259,2017-11-09 10:54:16,0
+54992,2,1,17,377,2017-11-09 03:50:43,0
+184822,2,1,13,435,2017-11-09 12:10:04,0
+193464,12,1,22,178,2017-11-07 13:03:21,0
+106598,6,2,9,125,2017-11-08 10:43:14,0
+74013,12,1,13,265,2017-11-08 09:10:59,0
+78739,18,1,23,107,2017-11-09 03:07:12,0
+43855,13,1,12,477,2017-11-08 13:59:37,0
+23358,1,1,17,134,2017-11-07 23:06:40,0
+67197,3,1,25,379,2017-11-07 13:57:39,0
+47902,18,1,19,439,2017-11-08 14:23:09,0
+124526,15,1,17,245,2017-11-08 01:15:38,0
+42869,25,1,16,259,2017-11-07 10:51:09,0
+96038,18,1,10,439,2017-11-08 15:13:08,0
+15343,18,1,19,107,2017-11-06 17:47:27,0
+54868,64,1,37,459,2017-11-08 02:14:46,0
+95140,3,1,13,280,2017-11-08 02:43:36,0
+254077,14,1,6,401,2017-11-09 11:26:19,0
+48337,12,1,13,245,2017-11-07 07:49:10,0
+22978,12,1,19,178,2017-11-06 17:44:37,0
+38725,3,1,1,280,2017-11-08 03:05:41,0
+73278,12,1,53,205,2017-11-08 14:56:57,0
+139498,3,1,20,424,2017-11-09 04:09:07,0
+163168,2,1,13,237,2017-11-07 04:36:15,0
+162293,12,1,16,242,2017-11-07 23:44:08,0
+80908,15,1,19,265,2017-11-07 13:00:02,0
+106136,2,1,19,237,2017-11-07 05:15:34,0
+73210,20,1,19,259,2017-11-08 15:13:27,0
+79092,12,1,32,145,2017-11-07 22:59:21,0
+88120,18,1,19,107,2017-11-09 09:22:00,0
+34499,3,1,15,115,2017-11-08 13:19:09,0
+123572,14,1,19,134,2017-11-07 00:25:09,0
+52692,26,1,15,477,2017-11-09 14:59:06,0
+63812,1,1,17,178,2017-11-07 02:10:02,0
+1634,15,1,18,412,2017-11-07 13:05:00,0
+43805,6,1,23,459,2017-11-07 13:20:49,0
+6713,9,1,13,215,2017-11-06 17:51:41,0
+2306,18,1,32,439,2017-11-09 04:27:26,0
+111324,2,1,19,237,2017-11-08 02:32:11,0
+7567,3,1,19,424,2017-11-08 08:44:32,0
+92712,12,1,15,326,2017-11-07 22:58:44,0
+119626,20,1,15,259,2017-11-09 14:06:33,0
+16473,13,1,13,477,2017-11-08 13:19:54,0
+53765,3,1,18,480,2017-11-06 23:17:41,0
+196072,2,1,13,243,2017-11-07 08:13:26,0
+58700,15,1,13,315,2017-11-09 13:10:17,0
+175250,3,1,17,173,2017-11-06 22:38:41,0
+99311,15,1,1,245,2017-11-06 16:56:28,0
+211579,14,1,13,489,2017-11-09 03:34:10,0
+45256,23,1,16,153,2017-11-09 04:39:47,0
+155689,3,1,32,280,2017-11-08 07:19:17,0
+625,3,1,20,280,2017-11-07 12:46:04,0
+73238,14,1,19,489,2017-11-07 08:10:44,0
+231612,12,1,13,259,2017-11-09 13:50:29,0
+125362,7,1,19,101,2017-11-09 06:11:16,0
+36311,12,1,19,265,2017-11-07 11:32:23,0
+16741,2,1,13,243,2017-11-09 03:59:23,0
+9513,8,1,19,145,2017-11-07 14:13:56,0
+17034,2,1,10,122,2017-11-09 12:06:03,0
+15968,23,2,19,153,2017-11-09 03:16:44,0
+10613,3,1,22,452,2017-11-08 12:04:03,0
+48451,1,1,13,153,2017-11-06 19:34:32,0
+40372,9,1,19,334,2017-11-08 01:01:25,0
+9314,64,1,15,459,2017-11-08 06:11:14,0
+95820,12,1,19,265,2017-11-09 14:33:11,0
+48919,9,1,22,244,2017-11-09 04:12:31,0
+48170,2,1,17,237,2017-11-08 14:00:03,0
+51299,18,1,22,107,2017-11-09 06:59:18,0
+17930,8,1,19,259,2017-11-07 15:22:43,0
+106770,27,1,13,122,2017-11-08 23:17:47,0
+151678,14,1,13,442,2017-11-09 08:02:54,0
+36445,3,1,13,115,2017-11-07 04:42:54,0
+74020,12,1,13,265,2017-11-07 12:33:56,0
+43837,9,1,37,244,2017-11-07 06:04:33,0
+64609,6,1,18,459,2017-11-07 06:19:01,0
+84610,15,1,13,153,2017-11-09 14:56:34,0
+1074,12,1,10,245,2017-11-07 06:59:03,0
+8401,9,2,36,258,2017-11-08 22:14:47,0
+91588,2,1,13,237,2017-11-09 00:37:52,0
+211889,15,1,15,278,2017-11-08 16:40:23,0
+7350,14,1,13,349,2017-11-07 03:05:44,0
+66218,3,1,20,280,2017-11-09 15:02:52,0
+110999,2,1,13,435,2017-11-08 04:21:06,0
+16290,3,1,15,280,2017-11-08 22:32:41,0
+240057,2,1,10,452,2017-11-09 03:23:15,0
+86383,2,1,19,469,2017-11-08 15:32:25,0
+2896,18,1,13,121,2017-11-07 09:43:53,0
+48170,18,1,13,107,2017-11-09 01:09:50,0
+206125,2,1,41,236,2017-11-06 22:31:04,0
+99810,2,1,6,237,2017-11-09 07:48:14,0
+32612,12,1,13,265,2017-11-07 15:13:10,0
+36183,151,0,24,347,2017-11-08 08:12:08,0
+48170,3,1,27,110,2017-11-09 12:25:59,0
+52961,11,1,41,137,2017-11-08 13:52:19,0
+111484,12,1,15,340,2017-11-09 10:48:05,0
+8259,12,1,14,265,2017-11-08 00:43:01,0
+43793,9,1,18,244,2017-11-09 12:56:18,0
+111438,15,1,13,245,2017-11-07 08:41:05,0
+49293,3,1,13,280,2017-11-07 06:03:16,0
+21508,15,1,13,111,2017-11-09 13:52:31,0
+106723,3,1,17,280,2017-11-09 01:56:03,0
+159862,2,1,70,477,2017-11-08 15:50:14,0
+89038,2,1,13,212,2017-11-08 17:33:36,0
+77844,20,1,19,478,2017-11-08 14:07:38,0
+70144,1,1,8,377,2017-11-07 03:25:55,0
+42530,64,1,13,459,2017-11-07 10:17:19,0
+35017,1,1,19,150,2017-11-09 00:06:36,0
+269538,3,1,22,371,2017-11-09 12:46:55,0
+42190,12,1,25,140,2017-11-07 12:20:54,0
+14737,18,1,22,379,2017-11-09 10:54:38,0
+104400,15,1,13,130,2017-11-07 23:40:33,0
+104386,13,1,17,477,2017-11-08 10:20:25,0
+93587,12,1,37,140,2017-11-08 04:00:33,0
+318097,3,1,18,130,2017-11-09 13:47:11,0
+101603,2,1,15,236,2017-11-09 05:00:46,0
+242813,2,1,40,469,2017-11-08 22:56:51,0
+19216,1,2,10,134,2017-11-07 07:38:50,0
+209477,151,0,38,347,2017-11-06 21:23:59,0
+144955,14,1,13,379,2017-11-08 00:23:37,0
+100042,15,1,19,245,2017-11-08 02:20:12,0
+4544,3,1,13,115,2017-11-08 05:56:47,0
+103335,13,1,47,477,2017-11-06 23:55:50,0
+99638,14,1,18,416,2017-11-09 07:20:52,0
+52961,12,1,8,178,2017-11-07 10:20:55,0
+118607,3,1,10,280,2017-11-07 05:23:15,0
+125222,18,1,19,121,2017-11-08 05:34:17,0
+46672,3,1,13,280,2017-11-08 14:27:04,0
+2936,3,1,8,442,2017-11-09 09:07:41,0
+77406,8,1,6,140,2017-11-09 09:09:42,0
+24404,12,1,19,340,2017-11-09 05:59:41,0
+82068,15,1,19,386,2017-11-07 16:19:19,0
+269733,3,1,25,280,2017-11-09 14:10:00,0
+116696,2,2,13,364,2017-11-08 05:06:56,0
+32675,12,1,19,265,2017-11-08 05:09:48,0
+96066,18,1,6,107,2017-11-09 09:55:14,0
+108913,25,1,37,259,2017-11-07 15:59:54,0
+75644,2,1,19,477,2017-11-07 05:31:37,0
+183147,3,1,18,211,2017-11-07 11:11:23,0
+68930,26,1,13,266,2017-11-07 22:52:55,0
+179333,11,1,32,487,2017-11-07 07:23:46,0
+63925,3,1,37,480,2017-11-08 01:47:19,0
+183314,12,1,35,265,2017-11-08 21:44:57,0
+130760,3,1,18,442,2017-11-08 03:25:32,0
+23484,15,1,41,265,2017-11-07 15:55:42,0
+184260,27,1,11,153,2017-11-09 11:40:57,0
+119289,15,1,17,315,2017-11-08 12:47:46,0
+48212,12,2,13,178,2017-11-07 16:49:12,0
+73487,12,1,19,259,2017-11-08 14:28:18,0
+265877,9,1,6,442,2017-11-08 01:29:15,0
+170860,18,1,1,107,2017-11-07 10:24:39,0
+193539,12,1,27,265,2017-11-09 03:08:33,0
+84866,12,1,37,265,2017-11-08 10:59:36,0
+114314,2,1,19,435,2017-11-07 05:18:53,0
+106492,2,1,8,237,2017-11-08 06:12:30,0
+36434,29,1,13,343,2017-11-08 03:26:31,0
+15459,23,1,17,153,2017-11-07 06:27:12,0
+26995,2,1,13,435,2017-11-07 06:32:17,0
+101267,15,1,17,153,2017-11-07 08:36:15,0
+89845,12,1,10,178,2017-11-07 12:19:40,0
+108481,6,1,8,459,2017-11-07 13:02:23,0
+40838,23,1,13,153,2017-11-09 15:04:59,0
+119531,12,2,17,265,2017-11-09 05:25:47,0
+17149,3,1,13,280,2017-11-08 00:32:28,0
+50169,64,1,23,459,2017-11-07 18:09:45,0
+103051,14,1,14,134,2017-11-07 01:41:18,0
+131336,18,1,19,107,2017-11-08 09:37:03,0
+105475,18,1,17,107,2017-11-06 17:49:48,0
+68724,9,1,14,107,2017-11-09 04:40:12,0
+18703,3,1,14,402,2017-11-09 11:02:58,0
+288439,19,0,0,333,2017-11-09 01:49:49,1
+129783,6,2,19,125,2017-11-08 12:55:54,0
+289892,2,1,20,477,2017-11-09 15:57:17,0
+234670,15,1,13,140,2017-11-08 13:30:47,0
+102163,3,1,19,442,2017-11-07 05:01:11,0
+119163,8,1,19,145,2017-11-08 23:27:21,0
+8352,12,1,13,140,2017-11-09 03:58:58,0
+193073,9,1,36,215,2017-11-07 01:34:13,0
+27437,12,1,10,481,2017-11-07 03:24:14,0
+56563,15,1,13,265,2017-11-09 11:54:11,0
+50489,11,1,19,330,2017-11-08 02:32:38,0
+123945,8,1,17,140,2017-11-08 07:31:29,0
+5648,17,1,13,280,2017-11-07 11:32:06,0
+115093,2,1,8,212,2017-11-09 09:40:10,0
+24537,2,1,13,477,2017-11-08 08:07:39,0
+191780,3,1,13,280,2017-11-08 11:27:37,0
+45413,3,1,13,115,2017-11-07 09:37:47,0
+147164,15,1,10,3,2017-11-07 01:36:20,0
+49652,24,2,5,105,2017-11-06 16:48:37,0
+149300,21,1,13,128,2017-11-06 16:24:28,0
+34624,14,1,17,463,2017-11-09 02:43:34,0
+257266,18,1,3,107,2017-11-08 10:21:29,0
+111414,3,1,31,280,2017-11-07 02:02:34,0
+83974,18,1,3,107,2017-11-09 13:05:19,0
+105606,15,1,16,245,2017-11-07 19:28:41,0
+5449,21,1,19,128,2017-11-06 23:47:35,0
+97018,9,1,17,134,2017-11-08 23:26:21,0
+17380,12,1,32,245,2017-11-07 15:31:54,0
+126461,2,1,15,477,2017-11-06 16:32:07,0
+225640,12,1,47,245,2017-11-08 14:16:25,0
+48282,12,1,25,178,2017-11-07 03:40:22,0
+24896,3,1,44,130,2017-11-09 00:49:34,0
+31061,2,1,13,364,2017-11-08 15:20:20,0
+24271,12,1,19,259,2017-11-07 15:14:00,0
+23674,1,2,18,13,2017-11-08 02:56:01,0
+14973,15,1,13,245,2017-11-07 13:20:40,0
+14271,18,1,1,107,2017-11-07 10:27:54,0
+100896,12,1,13,178,2017-11-08 07:07:02,0
+98995,3,1,18,379,2017-11-09 11:56:12,0
+18429,3,1,13,280,2017-11-08 06:46:37,0
+27918,18,1,17,107,2017-11-09 01:19:03,0
+92766,3,2,31,153,2017-11-07 19:19:06,0
+77209,3,1,32,452,2017-11-09 04:07:57,0
+7057,2,1,10,212,2017-11-08 06:42:07,0
+95662,21,1,19,128,2017-11-07 05:07:34,0
+78599,2,1,19,258,2017-11-08 23:38:17,0
+117269,14,1,11,118,2017-11-09 08:01:57,0
+73814,12,1,13,160,2017-11-07 23:41:27,0
+6196,14,1,19,401,2017-11-07 14:33:34,0
+327479,12,1,19,178,2017-11-09 10:03:38,0
+175576,15,1,13,130,2017-11-08 06:32:52,0
+75489,15,1,58,259,2017-11-07 12:27:04,0
+103611,18,1,17,121,2017-11-08 08:38:08,0
+93314,3,1,27,205,2017-11-08 07:28:20,0
+34208,25,1,9,259,2017-11-07 15:08:07,0
+48862,14,1,19,442,2017-11-08 11:49:28,0
+96777,12,1,17,265,2017-11-08 05:03:24,0
+120440,6,1,14,459,2017-11-09 06:19:50,0
+18246,3,1,12,280,2017-11-07 04:37:53,0
+55047,3,1,32,280,2017-11-08 01:13:46,0
+184921,3,1,22,466,2017-11-07 14:33:17,0
+85150,12,2,73,259,2017-11-09 10:21:26,0
+89083,9,1,12,215,2017-11-07 16:01:31,0
+108859,3,1,18,280,2017-11-09 01:37:58,0
+93263,3,1,10,409,2017-11-09 15:08:15,0
+49219,14,1,18,480,2017-11-09 09:50:54,0
+98635,12,1,35,245,2017-11-07 23:52:33,0
+34046,1,1,17,134,2017-11-08 00:13:15,0
+208090,21,1,37,128,2017-11-07 00:14:04,0
+79881,3,1,30,371,2017-11-09 12:52:21,0
+176785,15,1,19,278,2017-11-07 13:49:16,0
+63188,15,1,19,245,2017-11-08 10:42:39,0
+67776,2,1,13,435,2017-11-09 14:54:12,0
+179955,3,1,6,379,2017-11-06 18:07:46,0
+71969,28,1,19,135,2017-11-09 12:23:32,0
+75539,2,1,19,477,2017-11-08 07:23:18,0
+75431,3,1,9,280,2017-11-08 00:17:05,0
+99769,3,1,13,280,2017-11-09 03:23:39,0
+29031,1,1,25,349,2017-11-07 00:27:55,0
+70451,3,1,16,409,2017-11-07 04:37:47,0
+50641,18,1,10,134,2017-11-09 00:59:05,0
+77065,8,1,13,145,2017-11-09 09:23:22,0
+59660,13,1,13,400,2017-11-07 11:08:29,0
+181542,3,1,13,137,2017-11-08 01:48:05,0
+84488,1,1,37,124,2017-11-09 09:23:37,0
+150712,18,1,15,107,2017-11-07 01:00:27,0
+3133,23,1,37,153,2017-11-07 02:33:54,0
+15866,9,1,6,215,2017-11-07 03:04:15,0
+39216,14,1,19,379,2017-11-09 06:38:29,0
+88474,8,1,13,145,2017-11-07 14:49:58,0
+16010,3,1,32,280,2017-11-08 02:01:03,0
+111409,13,1,10,469,2017-11-08 23:46:29,0
+78524,18,1,19,134,2017-11-07 02:40:11,0
+88217,18,1,13,107,2017-11-08 16:34:35,0
+106674,18,1,18,107,2017-11-08 11:27:51,0
+60626,14,1,5,134,2017-11-08 16:06:13,0
+51718,13,1,13,477,2017-11-08 11:37:07,0
+120597,3,1,19,442,2017-11-06 22:11:06,0
+85085,3,1,19,211,2017-11-08 07:57:42,0
+71149,18,1,41,439,2017-11-09 03:42:43,0
+92993,18,1,19,439,2017-11-07 09:49:02,0
+93959,3,1,22,489,2017-11-09 13:08:05,0
+100176,18,1,13,107,2017-11-09 06:44:43,0
+174947,3,1,13,280,2017-11-07 02:27:00,0
+71762,3,1,13,280,2017-11-08 12:03:52,0
+65552,9,1,19,244,2017-11-08 08:20:15,0
+207400,3,1,19,280,2017-11-07 05:35:34,0
+125312,8,1,13,145,2017-11-06 22:57:30,0
+185121,9,1,16,466,2017-11-09 00:08:04,0
+69070,15,1,13,245,2017-11-08 12:15:59,0
+41980,2,1,13,377,2017-11-08 19:08:04,0
+75813,2,1,25,477,2017-11-08 04:02:50,0
+142416,3,1,17,452,2017-11-08 01:28:55,0
+201182,2,1,13,435,2017-11-07 15:42:29,0
+133522,22,1,19,496,2017-11-07 11:20:49,0
+36339,29,1,11,343,2017-11-07 02:58:37,0
+82012,2,1,19,236,2017-11-07 00:20:55,0
+38773,9,1,20,334,2017-11-07 17:54:04,0
+142067,2,1,6,435,2017-11-09 03:38:20,0
+38050,12,1,19,245,2017-11-07 16:08:00,0
+105475,15,1,36,3,2017-11-07 09:35:12,0
+201438,12,1,19,245,2017-11-07 08:52:32,0
+81613,9,1,13,232,2017-11-08 10:26:27,0
+97151,12,1,25,328,2017-11-07 14:15:40,0
+42103,9,2,9,334,2017-11-09 15:42:00,0
+31518,1,1,8,153,2017-11-09 03:38:56,0
+27406,12,1,13,245,2017-11-07 15:37:00,0
+81776,12,1,13,328,2017-11-09 06:30:25,0
+125934,12,1,17,265,2017-11-08 05:18:41,0
+81909,12,1,19,19,2017-11-08 11:29:07,0
+156937,2,1,13,237,2017-11-09 05:45:57,0
+88551,12,2,9,178,2017-11-08 10:49:29,0
+176464,18,1,19,107,2017-11-07 01:04:12,0
+111159,2,1,13,243,2017-11-09 05:14:42,0
+113389,12,1,8,178,2017-11-09 15:28:43,0
+56019,14,1,13,442,2017-11-09 00:50:30,0
+279884,2,1,17,435,2017-11-08 08:19:28,0
+82038,3,1,20,442,2017-11-07 19:55:56,0
+195434,1,1,19,125,2017-11-07 23:39:04,0
+99944,3,1,43,137,2017-11-07 14:43:53,0
+185871,21,1,15,128,2017-11-07 10:07:12,0
+210429,14,1,25,349,2017-11-09 01:32:06,0
+66316,1,1,23,124,2017-11-08 23:22:30,0
+121310,14,1,19,489,2017-11-08 09:39:06,0
+64054,20,2,20,259,2017-11-07 16:41:38,0
+117407,15,1,6,379,2017-11-09 06:48:05,0
+208047,8,1,17,259,2017-11-07 21:20:41,0
+41097,18,1,41,107,2017-11-08 14:29:27,0
+7350,23,1,19,153,2017-11-09 02:49:54,0
+95064,12,1,15,19,2017-11-09 04:14:25,0
+41369,2,1,19,243,2017-11-09 15:17:38,0
+14063,15,1,9,386,2017-11-08 23:46:29,0
+75007,3,1,19,205,2017-11-08 13:25:14,0
+92610,18,1,6,121,2017-11-07 13:08:28,0
+75634,2,1,19,212,2017-11-08 13:25:17,0
+63158,2,1,37,219,2017-11-09 14:52:44,0
+105560,3,1,19,417,2017-11-07 01:55:58,0
+60136,2,1,6,205,2017-11-07 16:56:07,0
+97283,3,1,13,280,2017-11-08 05:41:22,0
+5328,18,1,18,107,2017-11-09 04:26:16,0
+78856,9,1,28,334,2017-11-08 04:12:17,0
+79757,24,1,13,178,2017-11-07 12:49:27,0
+39818,7,1,13,101,2017-11-07 10:08:37,0
+39861,15,1,19,315,2017-11-09 01:19:25,0
+55119,2,1,35,237,2017-11-09 11:02:46,0
+73487,12,1,19,178,2017-11-07 09:14:05,0
+98497,15,1,37,245,2017-11-09 05:10:06,0
+120419,20,1,19,478,2017-11-08 05:53:55,0
+53715,93,1,13,371,2017-11-09 13:49:52,0
+14792,12,1,19,259,2017-11-09 15:34:18,0
+14301,22,1,19,116,2017-11-09 06:18:13,0
+5348,15,2,19,140,2017-11-09 09:28:40,0
+107709,3,1,13,280,2017-11-08 08:23:31,0
+61326,13,1,19,477,2017-11-07 03:44:31,0
+106986,3,1,31,280,2017-11-08 09:57:19,0
+78420,6,1,16,125,2017-11-09 11:43:53,0
+5596,9,1,13,450,2017-11-07 00:09:38,0
+94059,3,1,41,135,2017-11-07 11:45:38,0
+676,3,1,19,280,2017-11-08 04:55:29,0
+41611,15,1,25,430,2017-11-08 23:25:53,0
+16649,14,2,2,480,2017-11-09 04:53:33,0
+81896,14,1,1,401,2017-11-08 15:47:47,0
+129400,11,1,26,325,2017-11-07 11:42:52,0
+106644,3,1,32,280,2017-11-08 07:11:37,0
+3564,14,1,17,401,2017-11-08 08:05:25,0
+278716,11,1,6,319,2017-11-08 04:28:46,0
+100571,15,1,19,245,2017-11-08 13:46:02,0
+110768,2,1,17,236,2017-11-08 14:00:50,0
+106171,2,1,19,452,2017-11-08 06:01:52,0
+13516,7,1,58,101,2017-11-07 10:10:16,0
+40125,9,1,13,334,2017-11-07 03:31:25,0
+81898,12,1,13,178,2017-11-08 03:07:20,0
+40631,9,1,19,442,2017-11-08 01:12:53,0
+114617,27,1,25,122,2017-11-08 02:04:38,0
+5314,6,1,20,459,2017-11-07 14:21:13,0
+141572,28,1,18,135,2017-11-09 05:18:32,0
+179295,3,1,19,280,2017-11-07 02:08:27,0
+339057,9,1,13,232,2017-11-09 09:19:41,0
+3454,12,1,19,481,2017-11-07 00:14:05,0
+38633,27,1,8,153,2017-11-09 00:57:19,0
+90557,3,1,10,280,2017-11-09 05:25:55,0
+124750,14,1,13,349,2017-11-07 15:40:48,0
+115673,14,1,19,480,2017-11-09 09:06:37,0
+111025,3,1,13,280,2017-11-08 16:12:57,0
+81695,2,1,13,477,2017-11-07 05:43:48,0
+8539,12,1,35,265,2017-11-09 03:16:33,0
+114291,28,1,13,135,2017-11-08 13:48:42,0
+16929,2,1,8,452,2017-11-09 09:31:19,0
+44527,3,1,18,280,2017-11-08 00:40:08,0
+90920,18,3543,748,107,2017-11-07 18:59:32,0
+60529,3,1,19,173,2017-11-07 02:25:55,0
+38352,3,1,17,280,2017-11-08 10:56:36,0
+101894,3,1,13,205,2017-11-06 23:15:47,0
+100494,15,1,19,245,2017-11-09 03:39:20,0
+53454,2,1,14,377,2017-11-08 02:29:50,0
+124715,12,1,18,178,2017-11-08 15:51:32,0
+94987,23,1,17,153,2017-11-09 10:13:56,0
+82068,14,1,13,463,2017-11-09 03:37:25,0
+31540,12,1,19,178,2017-11-09 07:48:33,0
+44476,18,1,19,134,2017-11-08 00:41:09,0
+46754,8,1,10,145,2017-11-09 04:59:32,0
+93021,18,1,13,134,2017-11-08 02:50:38,0
+5314,2,1,6,477,2017-11-07 10:20:03,0
+23370,3,1,32,280,2017-11-08 15:30:14,0
+49520,1,1,13,153,2017-11-09 14:26:39,0
+138010,9,1,10,334,2017-11-06 23:55:02,0
+100896,9,1,12,334,2017-11-09 03:07:41,0
+41151,2,1,19,237,2017-11-07 00:49:54,0
+34632,15,1,19,130,2017-11-07 14:32:40,0
+103831,6,1,13,459,2017-11-08 12:55:37,0
+110189,21,1,18,128,2017-11-09 07:10:04,0
+109286,14,1,18,379,2017-11-08 06:41:23,0
+34388,20,1,13,478,2017-11-07 23:39:11,0
+84634,7,1,13,101,2017-11-08 09:56:15,0
+792,2,1,18,122,2017-11-09 01:36:42,0
+48859,12,1,19,219,2017-11-07 15:56:28,0
+50169,12,2,13,178,2017-11-07 10:53:29,0
+88180,15,1,19,245,2017-11-06 17:17:56,0
+97500,3,1,9,137,2017-11-08 14:00:23,0
+41106,15,1,13,245,2017-11-07 15:44:13,0
+44744,18,2,13,379,2017-11-09 11:11:02,0
+81012,12,1,18,145,2017-11-09 03:07:04,0
+81550,9,1,13,445,2017-11-08 14:24:42,0
+132837,151,0,24,347,2017-11-09 08:11:40,0
+31784,12,1,13,178,2017-11-07 00:14:15,0
+52052,3,1,13,280,2017-11-08 11:16:50,0
+36183,2,2,9,205,2017-11-08 16:31:24,0
+29490,3,1,13,489,2017-11-07 08:08:44,0
+110628,15,1,13,412,2017-11-08 18:01:15,0
+88914,14,1,15,463,2017-11-08 06:12:36,0
+197271,12,1,19,245,2017-11-07 16:35:55,0
+48288,14,1,20,360,2017-11-07 00:19:17,0
+16171,9,1,13,445,2017-11-06 17:31:03,0
+21845,24,1,19,105,2017-11-08 04:10:44,0
+55213,18,1,6,439,2017-11-08 04:03:01,0
+118094,13,1,8,477,2017-11-09 06:40:50,0
+100519,3,1,20,280,2017-11-09 07:29:36,0
+13597,12,1,22,245,2017-11-07 18:05:37,0
+75634,2,1,19,205,2017-11-07 14:54:03,0
+73144,2,1,13,317,2017-11-08 10:17:55,0
+124065,13,1,6,477,2017-11-08 11:32:25,0
+304680,21,1,19,128,2017-11-09 08:11:07,0
+28463,9,1,18,334,2017-11-08 21:25:07,0
+113682,3,1,19,280,2017-11-08 01:11:16,0
+315679,2,1,19,237,2017-11-09 04:20:35,0
+5314,12,2,13,178,2017-11-09 14:37:45,0
+120533,3,1,14,211,2017-11-08 12:08:41,0
+31158,2,1,19,469,2017-11-09 01:15:48,0
+92873,15,2,15,140,2017-11-07 12:16:16,0
+83928,12,1,13,409,2017-11-09 02:40:20,0
+100275,2,1,13,243,2017-11-08 15:06:21,0
+196454,6,1,13,125,2017-11-07 04:54:58,0
+19247,64,1,13,459,2017-11-07 12:41:27,0
+9647,8,1,17,145,2017-11-07 01:35:55,0
+63033,20,1,10,259,2017-11-08 08:03:49,0
+120221,14,1,13,442,2017-11-09 06:50:50,0
+19410,18,1,13,439,2017-11-08 05:24:46,0
+71597,13,1,13,477,2017-11-08 10:14:05,0
+191249,2,1,53,469,2017-11-07 02:27:05,0
+167801,6,1,23,459,2017-11-07 12:18:00,0
+9224,18,1,19,107,2017-11-08 00:41:20,0
+7715,150,1,48,110,2017-11-07 06:10:36,0
+64137,3,1,18,115,2017-11-06 16:16:39,0
+101074,15,1,13,245,2017-11-08 19:32:45,0
+14087,12,1,6,178,2017-11-09 12:25:10,0
+5201,12,1,18,245,2017-11-08 15:29:29,0
+10618,14,1,19,401,2017-11-07 06:30:18,0
+116708,12,1,19,259,2017-11-09 14:12:40,0
+113654,1,1,8,153,2017-11-09 09:46:29,0
+88730,6,1,15,459,2017-11-09 08:59:03,0
+135457,26,1,17,477,2017-11-09 02:29:52,0
+107520,28,1,37,135,2017-11-09 11:17:04,0
+125726,20,1,22,259,2017-11-06 16:04:49,0
+208308,12,1,19,245,2017-11-08 04:20:02,0
+37506,14,1,13,416,2017-11-07 01:17:41,0
+48240,7,2,9,101,2017-11-07 11:21:04,0
+69358,3,1,19,280,2017-11-07 05:40:50,0
+39742,9,1,13,234,2017-11-07 11:58:53,0
+170616,2,1,18,212,2017-11-09 04:42:21,0
+34419,2,1,17,435,2017-11-07 14:36:00,0
+50361,2,1,15,122,2017-11-09 11:42:43,0
+67586,2,1,19,236,2017-11-08 05:32:12,0
+18496,18,1,9,121,2017-11-08 16:27:13,0
+193464,6,1,15,459,2017-11-07 11:33:17,0
+18332,2,1,19,237,2017-11-07 14:41:54,0
+103295,2,1,1,435,2017-11-06 23:48:11,0
+12340,2,1,13,435,2017-11-08 09:35:59,0
+5812,2,1,15,477,2017-11-07 22:39:40,0
+37763,8,2,53,259,2017-11-09 13:41:16,0
+144957,12,1,19,265,2017-11-07 15:32:55,0
+220483,2,1,37,219,2017-11-07 23:59:13,0
+73487,3,1,22,409,2017-11-07 23:22:21,0
+66218,3,1,13,280,2017-11-06 17:07:41,0
+5314,3,1,3,115,2017-11-08 17:04:52,0
+4052,2,1,37,469,2017-11-09 13:34:36,0
+119349,3,1,13,280,2017-11-09 12:30:13,0
+65053,18,1,18,134,2017-11-08 08:04:01,0
+81714,15,1,13,111,2017-11-09 13:45:41,0
+124198,18,1,13,439,2017-11-08 06:38:19,0
+74621,21,1,12,128,2017-11-07 13:49:39,0
+15080,3,1,13,280,2017-11-07 01:57:19,0
+93542,9,1,19,445,2017-11-07 06:55:53,0
+317412,9,1,4,258,2017-11-09 11:05:04,0
+192648,26,1,11,121,2017-11-07 04:31:34,0
+66821,3,1,16,115,2017-11-09 10:33:30,0
+117582,10,1,19,113,2017-11-07 12:39:10,0
+160103,2,1,12,435,2017-11-08 06:23:36,0
+32930,12,1,17,409,2017-11-06 16:12:48,0
+63793,2,1,41,237,2017-11-09 04:55:13,0
+110211,3,1,16,130,2017-11-08 09:37:36,0
+120194,15,1,10,245,2017-11-08 08:36:45,0
+96165,64,1,13,459,2017-11-08 06:49:10,0
+63787,3,1,19,280,2017-11-08 07:17:24,0
+2321,9,1,13,232,2017-11-08 23:21:47,0
+22321,2,1,40,122,2017-11-07 09:59:05,0
+50397,12,1,19,497,2017-11-08 15:15:24,0
+210580,9,1,11,107,2017-11-09 09:31:21,0
+52805,2,1,34,236,2017-11-07 02:17:44,0
+40190,12,1,13,245,2017-11-06 17:46:50,0
+85072,15,1,10,245,2017-11-07 15:33:31,0
+46351,3,1,13,280,2017-11-08 13:12:49,0
+193104,64,1,19,459,2017-11-06 16:59:20,0
+65785,15,2,13,245,2017-11-08 05:01:29,0
+5314,28,1,4,135,2017-11-08 01:57:12,0
+56448,15,1,18,245,2017-11-07 08:51:09,0
+13634,11,1,3,481,2017-11-09 10:18:29,0
+172506,20,1,19,259,2017-11-06 17:28:57,0
+110032,3,1,6,137,2017-11-09 10:31:03,0
+198017,9,1,19,232,2017-11-07 07:25:11,0
+29836,3,1,8,19,2017-11-09 13:59:51,0
+26995,14,1,47,349,2017-11-09 02:13:40,0
+119688,12,1,22,409,2017-11-07 13:08:47,0
+71579,15,1,16,140,2017-11-09 01:46:35,0
+201420,3,1,16,280,2017-11-07 07:59:50,0
+123733,13,1,8,400,2017-11-09 02:52:07,0
+142432,11,1,25,219,2017-11-09 07:12:42,0
+16859,14,1,11,349,2017-11-09 03:21:11,0
+81973,9,1,13,466,2017-11-08 14:28:05,0
+111153,3,1,19,424,2017-11-06 16:00:14,0
+75057,2,1,25,469,2017-11-09 05:37:06,0
+77356,18,1,47,439,2017-11-07 18:25:39,0
+62930,6,1,13,459,2017-11-08 08:57:21,0
+59199,2,1,8,377,2017-11-07 16:32:00,0
+79925,3,1,19,280,2017-11-09 06:39:22,0
+28237,2,1,19,477,2017-11-09 06:46:23,0
+119531,26,1,13,477,2017-11-08 15:12:40,0
+12840,15,1,13,315,2017-11-09 09:19:38,0
+6330,3,2,19,153,2017-11-08 13:57:34,0
+101250,2,1,13,205,2017-11-09 04:28:49,0
+40028,3,1,22,480,2017-11-09 04:58:16,0
+94874,11,1,8,325,2017-11-07 08:08:30,0
+69135,12,1,8,140,2017-11-07 15:09:14,0
+124384,3,1,17,280,2017-11-08 15:29:02,0
+52024,3,1,19,153,2017-11-07 09:40:16,0
+71687,36,1,19,110,2017-11-09 15:56:36,0
+42851,3,1,18,442,2017-11-07 07:40:41,0
+166201,15,1,18,430,2017-11-07 01:12:18,0
+152307,2,1,15,219,2017-11-07 00:02:29,0
+101074,9,2,55,258,2017-11-09 12:50:10,0
+60735,9,1,20,244,2017-11-07 05:38:45,0
+17149,9,1,13,450,2017-11-09 06:42:40,0
+20116,2,1,13,477,2017-11-07 05:47:29,0
+85321,11,1,20,122,2017-11-08 15:13:09,0
+89960,3,1,6,137,2017-11-09 00:46:42,0
+66177,26,1,15,266,2017-11-09 14:10:27,0
+2254,3,1,13,173,2017-11-08 17:50:13,0
+641,18,1,13,134,2017-11-09 05:07:19,0
+231557,12,1,10,140,2017-11-08 06:31:01,0
+121419,2,1,19,452,2017-11-06 16:39:26,0
+83713,12,1,17,245,2017-11-08 11:55:39,0
+6396,24,1,19,105,2017-11-08 11:31:47,0
+55275,22,1,18,116,2017-11-07 05:04:18,0
+119531,9,2,1,107,2017-11-09 11:54:26,0
+35414,3,1,15,424,2017-11-08 06:33:02,0
+74478,9,1,17,334,2017-11-09 10:14:59,0
+64022,26,1,10,477,2017-11-09 09:27:12,0
+237101,2,2,13,122,2017-11-08 04:51:50,0
+83699,18,1,35,121,2017-11-08 08:29:34,0
+63205,3,1,19,173,2017-11-08 00:57:15,0
+35261,2,1,23,477,2017-11-09 06:35:22,0
+343722,14,1,41,118,2017-11-09 12:15:57,0
+114276,2,1,8,477,2017-11-07 06:36:53,0
+4869,9,1,19,466,2017-11-09 15:53:17,0
+6871,21,1,19,128,2017-11-08 00:20:58,0
+92766,3,1,19,280,2017-11-08 15:41:55,0
+65629,3,1,19,480,2017-11-08 09:42:07,0
+92108,15,1,13,265,2017-11-07 13:41:57,0
+123843,8,1,6,140,2017-11-09 11:42:04,0
+22266,1,1,22,452,2017-11-09 04:46:41,0
+23200,3,1,47,280,2017-11-09 06:00:08,0
+105116,14,1,13,463,2017-11-08 04:09:25,0
+52024,12,1,13,326,2017-11-07 10:24:11,0
+41573,3,1,48,409,2017-11-08 14:11:19,0
+71272,18,1,40,107,2017-11-08 05:48:51,0
+99954,10,1,19,317,2017-11-08 04:55:55,0
+86231,2,1,11,122,2017-11-07 07:44:01,0
+338551,19,0,24,213,2017-11-09 14:48:33,0
+123977,4,1,37,101,2017-11-08 02:29:06,0
+82137,2,1,19,435,2017-11-07 02:48:33,0
+108942,3,1,19,137,2017-11-08 14:02:46,0
+42297,2,1,17,212,2017-11-07 14:24:12,0
+3332,12,1,22,245,2017-11-08 14:22:00,0
+351763,18,1,8,134,2017-11-09 02:26:42,0
+106437,12,1,23,259,2017-11-09 07:19:06,0
+336720,12,1,19,19,2017-11-09 03:41:42,0
+238978,12,1,25,178,2017-11-08 23:58:58,0
+17149,9,2,17,442,2017-11-08 14:16:17,0
+58982,2,1,30,236,2017-11-09 02:50:49,0
+62616,14,1,12,379,2017-11-06 18:53:55,0
+80446,9,1,19,258,2017-11-08 23:00:50,0
+16453,25,2,9,259,2017-11-07 12:04:12,0
+26995,18,1,49,107,2017-11-09 07:00:15,0
+33841,3,1,17,137,2017-11-08 00:28:05,0
+37118,9,1,13,258,2017-11-08 17:17:27,0
+45745,9,1,6,466,2017-11-08 13:04:38,0
+134074,19,0,0,213,2017-11-07 15:33:23,0
+155347,18,1,9,107,2017-11-07 02:44:48,0
+65762,13,1,19,477,2017-11-07 20:12:16,0
+104569,6,1,13,459,2017-11-06 22:42:58,0
+83087,3,1,19,466,2017-11-08 23:24:57,0
+72346,15,1,19,140,2017-11-08 02:10:33,0
+43833,9,1,19,322,2017-11-09 15:15:28,0
+55032,15,1,36,245,2017-11-07 18:55:15,0
+115586,3,1,19,280,2017-11-08 13:52:17,0
+84819,21,1,13,128,2017-11-07 04:12:03,0
+185146,9,1,22,489,2017-11-08 06:57:51,0
+91611,19,0,0,213,2017-11-08 15:52:12,0
+27038,9,1,10,466,2017-11-08 07:26:48,0
+149790,18,1,10,107,2017-11-09 04:35:57,0
+119818,15,1,13,430,2017-11-09 10:46:12,0
+118146,9,1,18,466,2017-11-08 01:42:41,0
+18058,3,1,16,452,2017-11-08 08:44:12,0
+27445,8,1,20,145,2017-11-07 15:48:05,0
+242383,20,1,13,259,2017-11-08 13:14:58,0
+6908,18,1,19,439,2017-11-09 08:22:58,0
+39075,14,1,13,463,2017-11-07 07:59:58,0
+149367,9,1,12,215,2017-11-06 22:44:20,0
+43322,3,1,19,280,2017-11-07 08:08:17,0
+112302,2,2,10,477,2017-11-09 07:49:53,0
+12524,14,1,17,208,2017-11-08 01:06:20,0
+85625,9,1,19,466,2017-11-08 18:04:30,0
+109425,6,1,19,459,2017-11-07 03:04:11,0
+127181,21,1,19,128,2017-11-06 17:00:27,0
+88186,14,1,13,371,2017-11-08 00:25:25,0
+348142,9,2,19,244,2017-11-09 13:59:59,0
+154226,21,1,13,128,2017-11-08 04:13:00,0
+147957,27,1,19,153,2017-11-07 13:26:44,0
+124128,3,1,35,19,2017-11-08 11:08:56,0
+83472,3,1,6,280,2017-11-07 04:34:18,0
+121039,24,1,19,105,2017-11-07 16:43:52,0
+77869,9,1,19,232,2017-11-09 15:56:56,0
+70404,14,1,13,480,2017-11-08 22:40:41,0
+14737,18,1,17,134,2017-11-08 13:02:22,0
+120503,12,1,10,245,2017-11-07 05:17:50,0
+207815,18,1,20,107,2017-11-08 07:54:23,0
+165542,9,1,19,466,2017-11-09 00:27:16,0
+11607,1,2,2,153,2017-11-08 14:45:19,0
+26990,18,1,31,439,2017-11-07 00:22:44,0
+83045,2,1,15,237,2017-11-08 00:29:18,0
+121561,9,1,13,244,2017-11-09 09:09:35,0
+35929,15,1,19,245,2017-11-09 03:07:37,0
+26814,3,1,18,280,2017-11-08 13:14:14,0
+69886,12,1,6,265,2017-11-06 17:19:46,0
+59182,15,1,22,265,2017-11-07 04:32:54,0
+50168,18,1,27,107,2017-11-07 04:44:18,0
+106537,2,1,13,477,2017-11-08 19:11:41,0
+115407,21,1,9,232,2017-11-09 09:21:55,0
+36286,2,1,41,377,2017-11-09 01:15:37,0
+25289,21,1,25,128,2017-11-09 06:51:39,0
+92227,12,1,13,105,2017-11-07 14:29:33,0
+101818,11,1,19,137,2017-11-09 04:53:45,0
+75912,3,1,19,130,2017-11-09 15:02:36,0
+83824,3,1,13,130,2017-11-08 12:46:12,0
+1268,15,1,13,245,2017-11-08 19:56:40,0
+121278,21,1,19,128,2017-11-09 14:28:34,0
+191817,18,1,19,107,2017-11-07 05:17:28,0
+315991,3,1,13,280,2017-11-09 03:04:45,0
+132538,2,1,13,477,2017-11-07 01:52:46,0
+118469,9,1,13,134,2017-11-07 00:28:28,0
+73487,18,1,9,107,2017-11-08 08:15:50,0
+69710,3,1,25,280,2017-11-08 06:17:39,0
+105475,2,1,18,477,2017-11-07 17:36:05,0
+23733,1,1,19,124,2017-11-09 12:23:09,0
+26204,2,1,17,122,2017-11-09 03:28:30,0
+89210,1,1,17,153,2017-11-07 03:07:59,0
+133825,2,1,17,243,2017-11-06 20:48:51,0
+43167,18,1,27,107,2017-11-09 07:11:09,0
+6481,15,1,17,111,2017-11-06 17:03:45,0
+90782,13,1,16,469,2017-11-07 06:56:12,0
+48170,2,1,13,237,2017-11-09 13:00:51,0
+27782,15,1,16,412,2017-11-08 00:20:10,0
+53964,9,1,13,215,2017-11-07 09:19:30,0
+182483,12,1,13,259,2017-11-07 13:04:59,0
+121163,9,1,22,442,2017-11-09 14:35:36,0
+206498,9,1,10,448,2017-11-09 04:06:30,0
+22189,2,1,36,364,2017-11-08 16:11:17,0
+99150,12,1,3,205,2017-11-09 13:13:59,0
+42726,14,1,35,379,2017-11-09 01:13:47,0
+78881,3,1,17,489,2017-11-06 21:19:29,0
+84635,12,1,13,259,2017-11-06 20:07:47,0
+121419,12,1,19,265,2017-11-09 15:30:54,0
+110354,21,2,37,232,2017-11-08 22:29:48,0
+8179,13,1,53,477,2017-11-06 17:43:55,0
+40045,12,1,8,328,2017-11-07 05:41:00,0
+285017,3,2,9,442,2017-11-07 18:26:04,0
+74324,11,1,16,481,2017-11-06 18:04:33,0
+112295,19,0,24,213,2017-11-07 08:09:42,1
+41277,3,1,19,480,2017-11-08 08:03:04,0
+80926,14,1,19,442,2017-11-08 23:41:10,0
+75177,2,1,41,435,2017-11-09 06:53:07,0
+31055,3,1,13,115,2017-11-08 04:33:06,0
+56863,3,1,37,280,2017-11-07 08:10:41,0
+63994,3,1,19,489,2017-11-08 07:58:36,0
+69266,18,1,19,121,2017-11-07 17:54:07,0
+167200,12,1,32,140,2017-11-06 21:00:11,0
+106537,9,1,41,334,2017-11-07 14:04:42,0
+27751,13,1,16,477,2017-11-07 15:39:36,0
+105603,2,1,16,205,2017-11-08 14:08:24,0
+66948,12,1,41,265,2017-11-08 13:34:22,0
+92993,26,1,23,477,2017-11-08 11:30:46,0
+109425,18,1,18,107,2017-11-09 11:34:05,0
+226237,12,1,13,245,2017-11-07 18:24:01,0
+198160,26,1,22,477,2017-11-09 09:42:33,0
+16426,12,1,41,265,2017-11-09 02:49:05,0
+118229,3,1,22,402,2017-11-07 10:21:29,0
+50489,9,1,25,232,2017-11-07 11:55:57,0
+35841,8,1,13,145,2017-11-06 23:52:06,0
+120299,183,3543,748,347,2017-11-08 14:47:01,0
+114276,18,1,17,107,2017-11-09 05:46:17,0
+73516,12,1,35,326,2017-11-08 01:32:04,0
+162981,18,1,19,439,2017-11-06 16:21:04,0
+207864,12,1,49,481,2017-11-07 01:10:04,0
+38219,18,1,17,134,2017-11-09 02:39:36,0
+120413,3,1,28,280,2017-11-07 04:43:33,0
+125004,2,1,19,477,2017-11-08 03:39:09,0
+57276,12,1,10,340,2017-11-08 14:50:03,0
+118728,2,1,10,477,2017-11-07 09:20:21,0
+76745,11,1,47,487,2017-11-08 18:09:03,0
+109630,22,1,36,116,2017-11-07 07:33:37,0
+69395,12,1,13,259,2017-11-08 11:25:44,0
+220589,1,1,10,124,2017-11-09 03:31:23,0
+64756,32,1,8,376,2017-11-08 11:49:05,0
+111668,3,1,19,205,2017-11-07 13:04:42,0
+96030,9,1,10,215,2017-11-08 06:22:34,0
+27045,24,1,27,178,2017-11-09 03:16:15,0
+81699,2,1,23,236,2017-11-09 05:09:18,0
+195270,12,1,41,424,2017-11-07 05:14:10,0
+35046,12,1,19,178,2017-11-07 10:02:07,0
+103276,64,1,15,459,2017-11-07 14:28:26,0
+111025,15,1,13,140,2017-11-09 10:46:13,0
+90053,18,1,13,107,2017-11-09 15:44:11,0
+75177,11,1,17,219,2017-11-07 10:50:21,0
+40067,20,1,13,259,2017-11-08 19:28:13,0
+114904,2,1,6,477,2017-11-07 05:34:01,0
+20989,3,1,18,280,2017-11-09 05:54:53,0
+78426,3,1,20,379,2017-11-08 09:16:43,0
+135939,3,1,19,280,2017-11-07 05:33:52,0
+20528,9,1,19,334,2017-11-08 08:08:05,0
+60752,28,1,19,135,2017-11-09 12:12:52,0
+61400,18,1,37,107,2017-11-09 12:06:15,0
+151800,12,1,22,265,2017-11-08 08:59:11,0
+81082,3,1,20,280,2017-11-09 06:08:55,0
+32238,14,1,13,446,2017-11-09 13:56:28,0
+6641,8,1,40,145,2017-11-07 17:23:45,0
+31065,2,1,15,237,2017-11-08 02:56:31,0
+44673,3,1,13,417,2017-11-08 06:36:34,0
+19170,23,1,19,153,2017-11-07 19:08:12,0
+92904,2,1,17,469,2017-11-08 17:40:44,0
+66576,55,1,19,453,2017-11-07 02:58:14,0
+29300,22,1,16,116,2017-11-08 06:05:20,0
+100186,18,1,37,121,2017-11-09 06:59:34,0
+79214,18,1,18,134,2017-11-09 10:28:42,0
+93523,18,1,22,439,2017-11-09 05:35:51,0
+12505,2,1,17,205,2017-11-07 14:10:37,0
+120709,18,1,19,107,2017-11-08 13:26:27,0
+105560,18,1,27,134,2017-11-07 07:48:21,0
+177760,12,1,12,205,2017-11-08 09:30:18,0
+182367,2,1,13,469,2017-11-07 23:50:18,0
+107335,12,1,8,140,2017-11-09 09:11:46,0
+55840,3,1,22,379,2017-11-07 17:24:53,0
+42447,3,1,20,409,2017-11-06 17:59:28,0
+39368,3,1,19,280,2017-11-08 02:27:29,0
+159971,12,1,19,219,2017-11-07 05:05:59,0
+35973,14,1,17,113,2017-11-09 07:29:26,0
+34911,9,1,19,258,2017-11-07 00:34:51,0
+105534,2,1,13,205,2017-11-07 00:22:46,0
+48615,3,1,17,115,2017-11-09 06:43:40,0
+79827,1,1,6,153,2017-11-08 10:09:57,0
+34189,8,1,15,145,2017-11-09 09:03:58,0
+71388,18,1,13,107,2017-11-07 00:32:13,0
+59125,2,1,13,205,2017-11-07 16:10:05,0
+104705,13,1,13,477,2017-11-07 05:44:49,0
+40179,9,1,20,466,2017-11-09 15:59:44,0
+33143,18,1,22,439,2017-11-07 04:47:17,0
+91310,3,1,19,379,2017-11-08 11:50:24,0
+73555,9,2,19,466,2017-11-09 03:29:55,0
+105215,9,1,13,244,2017-11-07 20:48:28,0
+93263,2,2,65,205,2017-11-09 09:23:16,0
+28131,15,1,13,245,2017-11-07 16:10:06,0
+40329,2,1,19,452,2017-11-08 09:02:44,0
+139511,15,1,19,480,2017-11-07 00:37:41,0
+202750,3,1,18,115,2017-11-08 03:40:45,0
+168869,15,1,19,480,2017-11-08 08:32:00,0
+60271,12,1,19,245,2017-11-08 06:48:56,0
+38527,21,1,11,128,2017-11-08 00:21:02,0
+145970,15,1,17,140,2017-11-08 09:02:46,0
+162208,2,1,13,236,2017-11-08 08:34:54,0
+121239,12,1,20,328,2017-11-08 14:51:43,0
+37774,12,1,19,259,2017-11-07 08:13:55,0
+112014,2,1,19,469,2017-11-07 08:50:26,0
+201182,2,1,25,477,2017-11-07 03:37:44,0
+95766,12,2,19,265,2017-11-09 00:06:01,0
+94521,12,1,19,124,2017-11-08 20:05:58,0
+75856,12,1,13,178,2017-11-07 09:44:13,0
+76473,23,1,17,153,2017-11-08 03:41:59,0
+55830,24,1,19,105,2017-11-06 18:31:15,0
+95766,6,1,19,459,2017-11-08 03:28:17,0
+160345,2,1,13,377,2017-11-07 10:14:40,0
+103036,15,1,6,3,2017-11-09 13:20:38,0
+176863,12,1,19,178,2017-11-08 07:15:14,0
+67694,47,1,22,484,2017-11-09 10:56:15,0
+73516,9,1,19,234,2017-11-07 16:07:00,0
+48251,12,1,19,340,2017-11-09 02:03:54,0
+46266,20,2,18,259,2017-11-08 23:17:48,0
+107798,18,3543,748,107,2017-11-07 17:02:57,0
+164132,18,1,13,121,2017-11-06 23:23:32,0
+88281,3,1,2,113,2017-11-09 10:47:10,0
+181458,12,1,16,259,2017-11-09 10:52:45,0
+41471,12,1,19,245,2017-11-06 16:16:58,0
+7754,3,1,13,466,2017-11-08 08:52:56,0
+100275,18,1,35,107,2017-11-07 13:56:45,0
+73516,12,1,13,326,2017-11-07 13:41:31,0
+23398,9,1,10,334,2017-11-07 19:32:18,0
+91166,12,1,36,212,2017-11-09 12:54:13,0
+178851,13,1,3,400,2017-11-07 10:00:09,0
+105654,6,2,19,459,2017-11-07 14:51:00,0
+138585,2,1,25,377,2017-11-08 10:19:10,0
+105649,12,2,4,145,2017-11-07 16:00:05,0
+3964,3,1,13,135,2017-11-07 14:45:31,0
+103147,3,1,19,280,2017-11-07 03:51:47,0
+37556,6,1,20,125,2017-11-07 02:36:53,0
+128221,13,1,19,477,2017-11-07 02:50:04,0
+36613,9,1,19,215,2017-11-08 02:17:26,0
+69550,2,1,19,236,2017-11-07 10:57:53,0
+103351,2,1,19,477,2017-11-07 01:17:33,0
+100161,2,1,17,317,2017-11-08 10:39:55,0
+291772,2,1,19,219,2017-11-08 23:30:46,0
+135306,15,1,19,245,2017-11-08 03:40:22,0
+95366,2,1,6,477,2017-11-09 05:25:48,0
+124446,21,1,13,128,2017-11-09 09:26:59,0
+44164,2,1,27,469,2017-11-08 13:52:09,0
+145896,26,1,13,266,2017-11-07 15:20:50,0
+66276,2,1,23,122,2017-11-08 23:30:41,0
+164557,14,1,18,123,2017-11-07 13:39:01,0
+37490,12,1,9,259,2017-11-08 06:15:33,0
+9073,3,1,19,442,2017-11-09 09:46:35,0
+40710,1,1,19,115,2017-11-08 06:03:18,0
+21336,18,1,37,134,2017-11-08 04:21:30,0
+170208,2,1,22,435,2017-11-08 00:20:20,0
+93289,12,1,13,178,2017-11-07 11:08:41,0
+32765,3,1,19,442,2017-11-07 03:30:21,0
+18753,26,1,13,477,2017-11-08 13:43:05,0
+41963,2,1,13,236,2017-11-07 10:07:14,0
+59951,21,1,13,128,2017-11-09 10:36:12,0
+61348,9,1,20,258,2017-11-07 16:01:12,0
+121806,2,1,8,236,2017-11-07 08:56:49,0
+25071,12,1,13,245,2017-11-08 05:03:00,0
+51936,3,1,37,280,2017-11-09 00:06:01,0
+47162,3,1,19,280,2017-11-07 09:32:28,0
+112418,10,1,18,317,2017-11-08 00:19:43,0
+106929,26,1,16,121,2017-11-08 17:08:16,0
+66948,2,1,17,435,2017-11-07 13:33:40,0
+100393,14,1,15,463,2017-11-07 13:54:32,0
+57803,8,1,19,145,2017-11-07 23:52:26,0
+213150,3,1,19,280,2017-11-08 03:15:35,0
+9400,8,1,13,145,2017-11-09 07:03:02,0
+15431,3,1,19,280,2017-11-07 13:20:08,0
+5348,13,1,17,477,2017-11-08 10:29:47,0
+125175,15,1,19,245,2017-11-08 05:24:54,0
+50883,26,1,11,121,2017-11-07 22:22:22,0
+29533,3,1,22,442,2017-11-09 11:03:20,0
+89345,8,1,13,145,2017-11-07 08:05:19,0
+38380,3,1,13,379,2017-11-08 05:41:55,0
+106200,9,1,1,334,2017-11-07 00:39:43,0
+39018,12,1,47,265,2017-11-07 11:35:24,0
+92727,64,1,15,459,2017-11-08 12:03:59,0
+37972,535,3032,607,347,2017-11-07 02:42:56,0
+286608,9,1,13,134,2017-11-07 23:16:35,0
+75422,3,1,9,130,2017-11-09 15:52:08,0
+39338,3,1,8,280,2017-11-08 09:10:32,0
+37503,9,1,19,334,2017-11-09 08:40:46,0
+121368,3,1,10,280,2017-11-08 09:00:20,0
+56166,14,1,9,463,2017-11-08 04:28:39,0
+353225,1,1,15,213,2017-11-09 05:01:51,0
+58453,2,1,12,243,2017-11-08 01:53:01,0
+74013,12,1,19,409,2017-11-09 06:12:54,0
+182286,26,1,47,477,2017-11-09 08:51:00,0
+95946,12,1,31,259,2017-11-07 05:22:28,0
+32226,2,1,15,258,2017-11-08 07:08:37,0
+119289,14,1,9,463,2017-11-06 22:45:23,0
+96105,12,1,20,178,2017-11-08 11:00:12,0
+871,22,1,19,116,2017-11-08 23:39:30,0
+71295,12,1,19,265,2017-11-07 00:48:02,0
+116795,3,1,13,30,2017-11-09 14:40:34,0
+124295,3,1,12,280,2017-11-08 07:58:31,0
+120335,13,1,18,400,2017-11-07 06:28:56,0
+19175,3,1,15,280,2017-11-08 00:00:46,0
+202856,1,1,19,349,2017-11-07 07:47:35,0
+34939,21,1,41,128,2017-11-07 06:10:51,0
+63149,12,1,17,174,2017-11-07 00:37:12,0
+302976,1,1,4,124,2017-11-09 15:17:05,0
+41040,3,1,13,280,2017-11-09 04:28:42,0
+74728,6,1,19,459,2017-11-07 01:55:15,0
+69753,3,1,19,442,2017-11-09 06:12:40,0
+131991,12,1,13,265,2017-11-07 06:49:56,0
+31502,2,1,3,377,2017-11-09 01:04:32,0
+76211,12,1,19,259,2017-11-07 09:05:04,0
+103026,12,1,47,481,2017-11-09 15:13:59,0
+116375,12,1,14,328,2017-11-07 11:08:13,0
+61623,3,1,17,205,2017-11-09 01:26:57,0
+126401,15,1,13,245,2017-11-08 21:55:03,0
+81897,24,2,13,178,2017-11-08 14:12:51,0
+1699,23,1,6,153,2017-11-07 07:03:47,0
+25737,2,1,13,205,2017-11-08 15:18:48,0
+48846,3,1,19,280,2017-11-09 05:08:08,0
+49321,13,1,19,400,2017-11-07 10:06:24,0
+74550,9,1,16,391,2017-11-09 09:23:21,0
+205408,18,1,35,134,2017-11-08 12:29:20,0
+103224,9,1,47,466,2017-11-07 05:26:29,0
+75898,1,1,19,153,2017-11-09 04:19:12,0
+178588,22,1,13,496,2017-11-07 07:29:37,0
+88212,9,1,13,244,2017-11-07 10:57:57,0
+32000,2,1,13,435,2017-11-08 13:20:36,0
+99906,10,1,13,317,2017-11-08 00:47:59,0
+138385,18,1,4,439,2017-11-07 05:29:09,0
+61168,9,1,31,215,2017-11-07 16:57:59,0
+222960,8,1,13,140,2017-11-08 23:20:34,0
+16955,3,1,13,424,2017-11-09 07:21:59,0
+48671,13,1,19,477,2017-11-09 13:47:43,0
+42164,18,1,27,134,2017-11-09 06:46:33,0
+118474,3,1,18,115,2017-11-08 09:47:10,0
+205644,26,1,15,266,2017-11-09 04:47:43,0
+5314,12,1,13,178,2017-11-07 11:53:01,0
+18204,12,1,19,178,2017-11-09 05:51:17,0
+44067,3,1,19,280,2017-11-07 02:52:17,0
+14325,6,1,12,459,2017-11-07 08:36:14,0
+16354,12,1,13,140,2017-11-06 23:45:00,0
+257704,15,1,17,315,2017-11-08 17:10:40,0
+60384,9,1,22,215,2017-11-09 10:30:05,0
+2076,15,1,19,3,2017-11-09 08:02:15,0
+53454,11,1,13,173,2017-11-09 12:31:03,0
+55145,3,1,25,480,2017-11-08 16:14:28,0
+212504,12,1,19,178,2017-11-07 04:33:40,0
+64209,13,1,20,477,2017-11-06 17:23:19,0
+81513,15,1,19,153,2017-11-09 12:52:09,0
+43959,13,1,13,477,2017-11-07 11:41:11,0
+101919,9,1,22,215,2017-11-07 00:39:31,0
+180740,3,1,13,280,2017-11-07 06:20:09,0
+159333,14,1,19,134,2017-11-08 01:04:14,0
+44005,2,1,14,477,2017-11-08 11:09:17,0
+54895,14,1,14,463,2017-11-08 01:00:09,0
+98054,26,1,13,266,2017-11-07 00:34:04,0
+123901,3,1,19,280,2017-11-07 02:47:29,0
+64485,15,1,19,245,2017-11-06 16:02:02,0
+174433,9,1,8,334,2017-11-09 09:32:58,0
+10120,21,1,18,128,2017-11-07 09:59:35,0
+93162,15,1,19,245,2017-11-07 07:34:07,0
+67597,18,1,26,121,2017-11-08 04:03:23,0
+36934,2,1,13,243,2017-11-07 00:45:42,0
+261898,12,1,19,212,2017-11-08 08:57:38,0
+66266,2,1,18,477,2017-11-06 16:21:46,0
+201786,2,1,10,469,2017-11-07 04:56:01,0
+53454,9,1,13,445,2017-11-07 12:39:21,0
+30022,23,1,19,153,2017-11-09 03:55:24,0
+57886,27,1,14,122,2017-11-07 04:54:49,0
+204007,15,1,19,315,2017-11-07 08:13:22,0
+120203,23,1,14,153,2017-11-08 04:20:25,0
+10675,2,1,22,237,2017-11-09 05:39:10,0
+89372,8,1,19,145,2017-11-09 10:08:21,0
+65575,15,1,13,265,2017-11-09 07:23:20,0
+17552,2,1,8,212,2017-11-09 14:44:56,0
+64071,14,1,19,379,2017-11-07 20:02:44,0
+79238,3,1,19,417,2017-11-08 00:46:20,0
+103036,12,2,22,178,2017-11-08 01:31:21,0
+123974,3,1,30,130,2017-11-09 13:41:01,0
+102880,15,1,19,245,2017-11-07 03:34:11,0
+108806,9,1,13,466,2017-11-09 05:24:50,0
+5348,14,1,19,379,2017-11-09 15:25:27,0
+4381,9,1,20,334,2017-11-08 23:17:12,0
+71793,3,1,13,280,2017-11-08 18:12:46,0
+206237,18,1,15,107,2017-11-07 15:02:27,0
+45208,3,1,6,280,2017-11-07 06:39:18,0
+105292,14,1,19,208,2017-11-08 00:49:06,0
+31959,94,1,79,361,2017-11-09 15:42:40,0
+109096,2,1,10,477,2017-11-09 09:16:42,0
+43803,18,1,22,107,2017-11-07 01:52:37,0
+80827,18,1,13,379,2017-11-09 14:17:02,0
+11039,3,1,22,466,2017-11-08 04:51:16,0
+41112,3,1,27,280,2017-11-09 04:32:00,0
+78477,3,1,13,280,2017-11-08 10:20:49,0
+5348,26,1,35,121,2017-11-09 12:32:20,0
+41203,64,1,19,459,2017-11-07 12:02:45,0
+104188,18,1,19,121,2017-11-06 16:54:44,0
+10953,2,1,25,317,2017-11-08 10:28:51,0
+67237,15,1,13,265,2017-11-07 15:11:43,0
+70416,12,1,8,328,2017-11-08 11:19:51,0
+6903,11,1,13,319,2017-11-08 03:22:17,0
+105532,3,1,17,489,2017-11-09 13:31:32,0
+37462,12,1,19,409,2017-11-06 23:48:10,0
+95766,9,1,47,232,2017-11-09 15:14:00,0
+16775,12,1,13,245,2017-11-08 14:23:25,0
+51719,12,1,13,245,2017-11-09 15:54:25,0
+93587,3,1,13,280,2017-11-08 03:48:23,0
+104621,13,1,17,477,2017-11-08 15:04:45,0
+26995,12,1,13,178,2017-11-09 13:28:33,0
+22866,18,1,15,107,2017-11-08 00:30:43,0
+43667,1,1,53,439,2017-11-09 07:40:13,0
+83730,3,1,13,452,2017-11-08 09:07:09,0
+133301,25,1,13,259,2017-11-07 21:00:02,0
+232047,2,1,13,219,2017-11-08 05:29:52,0
+12874,19,0,0,210,2017-11-09 15:06:17,0
+198652,3,1,9,280,2017-11-09 01:51:16,0
+56581,18,1,13,439,2017-11-08 14:52:38,0
+148429,2,1,20,469,2017-11-07 12:16:55,0
+55508,24,1,13,105,2017-11-09 04:59:53,0
+35696,1,1,13,153,2017-11-08 12:59:42,0
+31534,1,1,17,134,2017-11-07 01:50:27,0
+48783,18,1,17,376,2017-11-07 08:03:35,0
+54991,14,1,19,463,2017-11-09 03:11:30,0
+240704,3,1,6,280,2017-11-09 02:46:22,0
+60163,2,1,9,205,2017-11-08 03:55:36,0
+73516,15,1,35,245,2017-11-07 09:40:55,0
+102163,3,1,17,280,2017-11-09 06:00:02,0
+60752,18,1,35,107,2017-11-08 13:07:14,0
+218373,18,1,22,121,2017-11-07 18:30:24,0
+87879,14,1,14,379,2017-11-09 05:41:42,0
+116740,3,1,13,115,2017-11-07 06:36:01,0
+52043,2,1,19,237,2017-11-07 14:40:23,0
+43793,13,1,6,400,2017-11-07 14:43:44,0
+99681,6,1,6,459,2017-11-09 12:06:41,0
+99917,3,1,18,452,2017-11-07 05:15:07,0
+12505,20,1,17,259,2017-11-07 01:37:35,0
+77361,21,1,7,232,2017-11-08 19:10:57,0
+24021,20,1,19,478,2017-11-07 00:18:42,0
+37972,1,1,22,439,2017-11-08 13:04:38,0
+92327,12,1,13,245,2017-11-07 07:00:29,0
+100149,3,1,1,317,2017-11-08 07:02:03,0
+55979,15,1,19,153,2017-11-07 08:06:26,0
+7346,7,1,19,101,2017-11-07 10:21:38,0
+53073,9,1,18,334,2017-11-08 19:08:55,0
+38163,12,1,13,245,2017-11-07 23:08:09,0
+26384,2,1,13,364,2017-11-08 19:33:11,0
+125385,18,1,13,30,2017-11-07 03:20:16,0
+67589,2,1,12,469,2017-11-07 00:43:42,0
+148500,3,1,13,280,2017-11-08 02:16:11,0
+50136,3,1,19,280,2017-11-08 14:13:03,0
+44493,13,1,18,477,2017-11-08 18:30:02,0
+58369,3,1,18,280,2017-11-08 05:45:52,0
+60163,2,2,43,205,2017-11-08 04:23:45,0
+4019,3,1,17,211,2017-11-09 15:49:19,0
+119289,12,2,37,265,2017-11-09 12:11:47,0
+73144,6,1,19,125,2017-11-06 17:03:13,0
+53454,2,1,12,477,2017-11-07 06:06:31,0
+191378,18,1,19,107,2017-11-09 10:23:56,0
+7373,19,0,0,347,2017-11-09 04:20:25,0
+50588,25,1,3,259,2017-11-08 10:09:55,0
+307887,14,1,20,113,2017-11-09 02:32:52,0
+93486,23,1,18,153,2017-11-08 08:34:28,0
+27268,26,1,22,266,2017-11-08 12:54:35,0
+76872,18,1,10,107,2017-11-08 10:01:40,0
+53454,9,1,6,127,2017-11-09 09:38:40,0
+57154,18,1,14,134,2017-11-07 20:00:00,0
+50689,3,1,19,19,2017-11-07 01:19:53,0
+178716,12,1,1,178,2017-11-07 09:38:17,0
+121979,2,1,18,477,2017-11-09 02:34:46,0
+117115,13,1,19,400,2017-11-08 09:41:05,0
+5348,13,1,27,477,2017-11-06 23:08:35,0
+45467,12,1,19,19,2017-11-07 15:56:29,0
+125592,14,1,13,467,2017-11-08 19:39:31,0
+1089,17,1,16,280,2017-11-07 03:03:21,0
+73487,14,1,18,467,2017-11-09 09:17:09,0
+85150,2,1,6,236,2017-11-08 16:12:11,0
+79909,12,1,23,245,2017-11-07 07:59:49,0
+111931,25,1,22,259,2017-11-07 04:30:55,0
+207643,6,1,53,459,2017-11-08 08:33:19,0
+44744,6,1,13,459,2017-11-09 15:22:32,0
+46625,11,1,19,481,2017-11-09 05:47:50,0
+68930,9,1,19,134,2017-11-08 14:15:39,0
+85541,3,1,1,280,2017-11-08 14:43:07,0
+238143,12,1,17,265,2017-11-08 09:06:37,0
+28611,12,1,13,245,2017-11-09 05:20:19,0
+68418,3,1,9,489,2017-11-08 06:28:38,0
+18082,9,1,13,215,2017-11-08 02:17:48,0
+121209,9,1,32,232,2017-11-07 13:17:02,0
+89456,10,1,13,317,2017-11-08 06:31:04,0
+25097,3,1,10,402,2017-11-09 06:44:17,0
+194809,9,1,19,334,2017-11-08 02:49:10,0
+91366,9,1,17,244,2017-11-07 09:22:50,0
+21627,17,1,19,134,2017-11-08 08:15:36,0
+39361,3,1,8,280,2017-11-07 00:27:03,0
+56430,23,1,16,153,2017-11-08 19:19:51,0
+233477,3,1,22,424,2017-11-08 05:58:56,0
+301829,7,1,23,101,2017-11-09 06:56:16,0
+145490,12,1,36,497,2017-11-07 08:36:11,0
+319476,2,1,77,477,2017-11-08 22:26:41,0
+45432,3,1,18,424,2017-11-08 01:21:16,0
+48282,7,1,42,101,2017-11-09 14:09:28,0
+5348,14,2,27,123,2017-11-07 08:40:17,0
+105714,15,1,6,245,2017-11-07 22:41:25,0
+82012,8,1,10,145,2017-11-06 23:24:50,0
+5788,2,1,10,435,2017-11-07 01:04:57,0
+70298,47,1,6,484,2017-11-08 02:24:50,0
+106883,12,1,19,265,2017-11-07 03:22:09,0
+116366,12,1,19,178,2017-11-06 23:02:01,0
+169049,9,1,19,244,2017-11-07 05:35:01,0
+150739,2,1,15,477,2017-11-07 04:06:14,0
+8482,3,1,19,280,2017-11-08 01:35:50,0
+16464,3,1,13,115,2017-11-07 11:04:50,0
+128051,2,1,22,122,2017-11-07 07:24:59,0
+36434,2,1,41,477,2017-11-07 16:49:24,0
+113676,9,1,19,489,2017-11-09 14:25:03,0
+85900,18,1,22,107,2017-11-07 07:40:03,0
+123872,8,2,49,259,2017-11-07 03:12:27,0
+165108,21,1,13,128,2017-11-07 04:58:25,0
+116317,12,1,13,178,2017-11-08 06:03:43,0
+161797,12,1,23,265,2017-11-09 12:23:14,0
+73916,12,1,9,242,2017-11-08 10:47:22,0
+108594,1,1,22,439,2017-11-08 02:26:33,0
+20215,2,1,37,401,2017-11-09 08:30:38,0
+5178,26,1,17,477,2017-11-09 11:41:19,0
+107809,1,1,53,317,2017-11-09 13:41:28,0
+123623,7,2,19,101,2017-11-07 10:13:11,0
+259838,3,1,41,371,2017-11-09 10:04:34,0
+469,13,1,22,400,2017-11-08 09:34:15,0
+53454,3,1,19,280,2017-11-07 11:48:19,0
+835,15,2,97,245,2017-11-07 12:15:15,0
+37515,1,1,10,134,2017-11-07 13:33:22,0
+293896,9,1,6,107,2017-11-09 12:16:21,0
+94104,6,1,18,459,2017-11-07 03:19:34,0
+90997,22,1,13,116,2017-11-08 11:51:40,0
+15983,22,1,23,496,2017-11-07 08:32:37,0
+1413,24,1,17,178,2017-11-08 16:31:28,0
+5348,2,1,13,236,2017-11-09 14:01:36,0
+84596,14,1,46,489,2017-11-09 04:23:48,0
+73978,12,1,23,259,2017-11-08 12:49:17,0
+5348,3,1,41,442,2017-11-09 14:35:54,0
+177000,18,1,1,107,2017-11-07 01:29:48,0
+4345,1,1,13,134,2017-11-09 12:08:26,0
+107049,3,1,20,205,2017-11-07 22:30:02,0
+69741,3,1,14,280,2017-11-07 07:51:49,0
+165017,3,1,13,205,2017-11-08 09:51:56,0
+221639,19,0,50,213,2017-11-09 05:59:51,0
+279448,9,1,19,232,2017-11-08 13:01:32,0
+95906,18,1,19,107,2017-11-08 04:50:24,0
+118412,9,1,26,466,2017-11-07 03:31:28,0
+161406,15,1,19,386,2017-11-09 04:14:56,0
+86835,2,1,15,236,2017-11-09 04:45:20,0
+12506,18,2,56,107,2017-11-07 04:53:59,0
+28168,23,1,16,153,2017-11-07 03:10:40,0
+147609,9,1,13,442,2017-11-07 09:17:24,0
+54841,8,1,19,145,2017-11-07 02:47:51,0
+11287,3,1,19,280,2017-11-08 11:26:29,0
+89596,9,1,13,134,2017-11-07 22:29:19,0
+15171,13,1,14,477,2017-11-09 04:43:36,0
+120471,3,1,13,280,2017-11-07 01:56:48,0
+185233,151,0,76,347,2017-11-08 20:27:58,0
+55675,6,1,35,125,2017-11-07 10:58:25,0
+114276,15,1,13,153,2017-11-08 11:10:31,0
+81973,2,1,19,477,2017-11-07 04:31:36,0
+49696,28,1,19,135,2017-11-09 12:13:51,0
+197234,15,1,22,130,2017-11-07 18:16:14,0
+36213,2,1,19,205,2017-11-07 13:52:45,0
+163784,12,1,13,265,2017-11-09 06:17:11,0
+50397,11,1,48,487,2017-11-07 12:06:23,0
+30016,2,1,19,477,2017-11-07 05:09:39,0
+246484,10,1,36,113,2017-11-08 14:16:29,1
+34137,3,1,9,280,2017-11-08 05:40:09,0
+70552,14,1,13,463,2017-11-08 08:04:04,0
+41446,26,1,10,121,2017-11-08 05:16:19,0
+100203,12,1,25,245,2017-11-08 01:31:53,0
+103411,3,1,27,211,2017-11-06 17:49:26,0
+23908,25,1,41,259,2017-11-08 01:18:08,0
+18757,15,1,19,430,2017-11-07 13:34:05,0
+27879,3,1,19,442,2017-11-09 04:06:45,0
+122259,14,1,13,349,2017-11-08 00:03:16,0
+179458,3,1,19,452,2017-11-07 09:34:18,0
+250794,3,1,37,466,2017-11-09 14:55:16,0
+273363,9,1,17,466,2017-11-08 11:31:18,0
+107968,25,1,19,259,2017-11-08 09:02:16,0
+123907,12,1,19,265,2017-11-07 10:00:09,0
+8286,18,1,17,439,2017-11-07 02:27:26,0
+29436,25,1,3,259,2017-11-07 13:34:28,0
+22374,28,1,13,135,2017-11-08 14:11:52,0
+118525,9,1,13,215,2017-11-09 04:01:20,0
+63883,15,1,13,278,2017-11-08 23:40:45,0
+100182,5,1,13,377,2017-11-07 22:40:29,0
+80193,8,1,12,145,2017-11-08 17:22:43,0
+46320,8,2,9,145,2017-11-09 11:07:02,0
+154183,15,1,1,278,2017-11-07 03:14:28,0
+106929,15,1,19,480,2017-11-07 22:53:34,0
+27959,3,1,19,280,2017-11-08 01:18:29,0
+103463,15,1,22,3,2017-11-07 16:33:48,0
+62072,12,1,19,497,2017-11-09 03:02:25,0
+139090,9,1,19,466,2017-11-08 23:14:37,0
+72262,21,1,19,232,2017-11-09 00:06:30,0
+24404,2,2,49,205,2017-11-09 02:00:05,0
+123788,12,2,9,140,2017-11-07 00:01:02,0
+106712,9,1,13,244,2017-11-08 11:33:13,0
+159797,12,1,13,178,2017-11-07 04:12:35,0
+69034,12,1,13,265,2017-11-06 22:43:05,0
+157658,12,1,13,140,2017-11-09 15:42:38,0
+276985,11,1,6,173,2017-11-08 15:33:22,0
+219169,12,1,17,245,2017-11-09 01:15:50,0
+26814,12,1,13,178,2017-11-07 13:35:03,0
+210912,24,1,13,105,2017-11-07 09:01:49,0
+191759,1,1,13,134,2017-11-07 11:55:28,0
+46899,12,1,13,178,2017-11-08 10:31:37,0
+119688,3,1,19,480,2017-11-08 22:55:56,0
+105475,13,2,13,400,2017-11-08 04:52:03,0
+7838,14,1,47,480,2017-11-07 17:32:22,0
+49821,21,1,14,128,2017-11-07 18:45:07,0
+53929,18,1,13,107,2017-11-08 15:00:38,0
+136705,3,2,19,480,2017-11-07 05:15:01,0
+122046,25,1,20,259,2017-11-08 06:23:05,0
+62916,3,1,13,280,2017-11-09 01:12:30,0
+105603,2,1,19,205,2017-11-08 09:04:18,0
+143171,3,1,25,424,2017-11-07 22:16:45,0
+97744,2,1,17,477,2017-11-09 06:18:34,0
+75975,2,1,8,469,2017-11-08 01:55:50,0
+142268,3,1,8,442,2017-11-07 14:46:56,0
+123871,23,1,13,153,2017-11-06 23:52:21,0
+90521,9,2,14,334,2017-11-07 09:09:41,0
+69375,3,1,19,19,2017-11-08 00:49:22,0
+26589,2,1,12,364,2017-11-07 02:30:29,0
+49177,27,1,13,153,2017-11-07 02:42:19,0
+55819,3,1,35,442,2017-11-09 11:12:01,0
+39140,12,1,19,178,2017-11-09 00:14:30,0
+99887,25,1,22,259,2017-11-09 12:35:13,0
+84809,12,1,22,245,2017-11-09 01:56:13,0
+86767,12,1,22,242,2017-11-09 13:29:28,0
+269695,13,1,31,477,2017-11-08 22:23:13,0
+180894,14,1,83,442,2017-11-07 04:42:43,0
+190676,18,1,19,107,2017-11-06 16:50:52,0
+104705,14,1,13,379,2017-11-09 12:48:35,0
+32412,13,1,40,477,2017-11-09 13:15:12,0
+110141,3,1,18,280,2017-11-07 09:01:55,0
+127414,3,1,19,280,2017-11-08 04:23:46,0
+48212,14,1,15,118,2017-11-09 07:42:41,0
+19124,15,1,20,245,2017-11-07 09:04:26,0
+145777,14,1,6,134,2017-11-06 18:58:10,0
+73673,3,1,3,480,2017-11-09 01:53:36,0
+76764,12,2,1,178,2017-11-09 14:10:52,0
+11287,21,1,19,128,2017-11-08 04:50:15,0
+16011,15,1,17,379,2017-11-07 18:00:21,0
+105485,2,1,19,205,2017-11-08 12:00:21,0
+28950,12,1,13,135,2017-11-06 19:55:00,0
+52710,2,1,30,219,2017-11-09 13:18:11,0
+71789,14,1,13,467,2017-11-09 06:42:26,0
+11347,68,3032,607,347,2017-11-07 14:18:43,0
+18927,14,1,19,439,2017-11-08 03:01:00,0
+38732,3,1,13,173,2017-11-07 08:55:50,0
+117867,3,1,9,409,2017-11-06 23:13:47,0
+35984,7,1,1,101,2017-11-09 09:21:50,0
+1780,18,1,25,121,2017-11-06 16:40:55,0
+59791,15,1,13,245,2017-11-08 23:28:49,0
+70787,9,1,17,466,2017-11-09 09:53:02,0
+15781,93,1,19,371,2017-11-08 02:38:30,0
+234242,6,1,27,459,2017-11-09 10:44:02,0
+132086,14,1,19,416,2017-11-07 01:18:06,0
+263654,18,1,19,439,2017-11-08 04:45:02,0
+103097,20,1,19,259,2017-11-08 23:58:10,0
+5348,1,1,18,153,2017-11-08 15:00:16,0
+222789,14,1,19,379,2017-11-08 02:43:26,0
+27114,12,1,10,174,2017-11-07 00:45:35,0
+125261,14,1,18,134,2017-11-09 00:31:22,0
+65937,12,1,19,328,2017-11-08 14:32:38,0
+1319,18,1,22,439,2017-11-07 11:54:57,0
+123871,18,1,17,121,2017-11-09 07:09:40,0
+218035,19,21,24,101,2017-11-07 23:32:19,0
+91734,2,1,13,469,2017-11-07 05:27:08,0
+117930,9,1,9,258,2017-11-08 06:11:16,0
+68963,11,1,19,122,2017-11-07 09:31:38,0
+102099,3,1,19,480,2017-11-07 10:51:25,0
+23700,3,1,13,424,2017-11-07 04:09:33,0
+44410,12,1,23,245,2017-11-08 11:30:24,0
+77085,9,1,13,445,2017-11-09 14:07:30,0
+115729,26,1,13,121,2017-11-08 12:38:01,0
+99769,12,1,19,424,2017-11-08 05:06:24,0
+269733,1,1,27,377,2017-11-09 04:40:35,0
+95820,15,1,19,153,2017-11-08 10:57:43,0
+267941,26,1,19,266,2017-11-08 01:47:43,0
+26593,2,1,32,219,2017-11-08 16:09:25,0
+52554,2,1,6,212,2017-11-07 12:28:40,0
+62413,2,1,22,219,2017-11-08 00:14:34,0
+200605,12,1,18,328,2017-11-07 16:25:14,0
+75177,26,1,41,121,2017-11-07 06:44:14,0
+98510,12,2,13,245,2017-11-07 15:34:48,0
+238196,9,1,19,466,2017-11-08 12:51:06,0
+21249,18,3032,607,107,2017-11-06 22:43:24,0
+116625,18,1,13,439,2017-11-08 10:09:56,0
+98403,3,1,19,442,2017-11-08 04:44:18,0
+26995,11,2,25,137,2017-11-08 15:14:18,0
+71071,9,2,19,466,2017-11-09 09:34:22,0
+50055,12,1,15,178,2017-11-07 11:59:36,0
+116425,2,2,47,364,2017-11-09 14:19:08,0
+209663,3,1,18,402,2017-11-07 09:31:04,0
+66933,2,1,19,236,2017-11-08 09:06:56,0
+75670,3,2,18,115,2017-11-07 12:56:47,0
+67895,9,1,19,466,2017-11-09 12:09:09,0
+77107,1,1,19,101,2017-11-08 02:57:21,0
+77216,2,1,20,477,2017-11-06 22:29:40,0
+56498,11,1,32,325,2017-11-08 04:55:28,0
+108697,9,1,13,442,2017-11-08 08:12:04,0
+180757,3,1,37,371,2017-11-08 01:39:16,0
+284170,27,1,19,153,2017-11-07 22:16:57,0
+119542,15,1,18,391,2017-11-07 01:22:19,0
+111976,12,1,19,245,2017-11-07 23:06:00,0
+1673,12,1,17,178,2017-11-08 09:47:53,0
+144508,2,1,19,237,2017-11-08 02:55:48,0
+30483,202,1,20,421,2017-11-06 16:58:14,0
+127081,14,1,19,463,2017-11-07 13:51:41,0
+42143,14,1,13,123,2017-11-07 05:03:49,0
+45595,2,1,19,477,2017-11-08 23:21:52,0
+85642,12,1,13,178,2017-11-09 13:16:42,0
+108857,14,1,17,489,2017-11-07 09:56:12,0
+100182,1,2,19,134,2017-11-09 00:25:03,0
+58669,3,1,13,280,2017-11-09 05:53:17,0
+77318,20,1,30,478,2017-11-07 13:43:29,0
+13034,2,1,30,258,2017-11-06 17:51:32,0
+172301,14,1,3,134,2017-11-08 05:55:29,0
+80084,12,1,20,245,2017-11-08 04:21:39,0
+76765,15,1,19,245,2017-11-09 15:08:21,0
+108602,2,1,27,452,2017-11-08 10:25:00,0
+45362,11,1,23,219,2017-11-09 05:19:51,0
+73487,23,1,19,153,2017-11-07 05:23:37,0
+14010,28,1,13,135,2017-11-09 09:51:47,0
+95255,24,1,6,105,2017-11-09 02:37:37,0
+294087,9,2,47,134,2017-11-09 11:13:44,0
+27001,2,1,19,237,2017-11-09 09:02:11,0
+23554,1,1,13,377,2017-11-07 01:10:44,0
+66973,2,1,13,237,2017-11-08 23:49:32,0
+93021,9,1,18,334,2017-11-09 04:08:25,0
+59882,2,1,8,212,2017-11-08 14:26:10,0
+114854,2,1,19,469,2017-11-08 12:11:45,0
+108386,15,1,44,386,2017-11-09 14:27:17,0
+63762,12,2,9,178,2017-11-07 10:35:26,0
+182843,26,1,10,266,2017-11-09 05:39:32,0
+95991,15,1,19,480,2017-11-09 08:20:58,0
+45745,12,1,19,219,2017-11-09 14:13:08,0
+121851,9,1,37,232,2017-11-08 01:46:30,0
+207838,12,1,39,259,2017-11-09 10:44:25,0
+50924,9,1,13,466,2017-11-09 09:38:05,0
+116651,12,1,19,135,2017-11-08 15:14:17,0
+97463,7,1,22,101,2017-11-07 23:20:09,0
+102025,12,1,6,340,2017-11-09 15:51:11,0
+100088,22,1,19,116,2017-11-09 04:42:38,0
+66215,12,1,19,219,2017-11-09 15:07:43,0
+137762,2,1,19,236,2017-11-07 01:26:14,0
+42299,18,1,19,439,2017-11-09 03:25:42,0
+152714,13,1,15,477,2017-11-07 15:59:31,0
+267144,3,1,19,205,2017-11-08 00:58:42,0
+118284,13,1,17,477,2017-11-07 23:00:01,0
+66845,27,1,6,153,2017-11-09 12:16:21,0
+48671,12,1,6,245,2017-11-07 15:21:43,0
+8352,15,1,19,245,2017-11-09 00:47:48,0
+128368,23,1,19,153,2017-11-07 09:23:20,0
+10200,3,1,6,130,2017-11-07 14:24:59,0
+73933,3,1,13,280,2017-11-08 07:02:16,0
+121442,2,1,10,122,2017-11-07 00:46:42,0
+85506,14,1,20,442,2017-11-08 05:43:30,0
+89192,9,1,22,442,2017-11-08 17:25:08,0
+84587,2,1,13,212,2017-11-09 03:36:08,0
+294731,18,1,19,134,2017-11-08 23:12:31,0
+95766,15,1,41,245,2017-11-06 22:52:34,0
+112005,15,1,13,153,2017-11-08 09:45:38,0
+148952,15,1,9,265,2017-11-08 00:40:11,0
+8224,14,1,17,379,2017-11-08 10:27:25,0
+7146,6,1,27,125,2017-11-07 00:53:48,0
+107887,12,1,19,265,2017-11-09 02:25:57,0
+29569,2,1,6,219,2017-11-08 23:37:41,0
+23496,9,1,10,244,2017-11-08 03:52:08,0
+77906,18,3543,748,107,2017-11-07 17:44:19,0
+58485,3,1,8,424,2017-11-08 02:07:08,0
+107748,25,1,30,259,2017-11-09 01:14:07,0
+81837,18,1,15,134,2017-11-08 09:29:07,0
+78622,3,1,8,137,2017-11-07 14:42:48,0
+40494,12,1,13,259,2017-11-08 07:28:53,0
+98995,23,1,13,153,2017-11-07 02:28:26,0
+72773,14,1,17,379,2017-11-06 23:46:34,0
+176455,3,1,18,280,2017-11-07 11:37:31,0
+92183,14,1,19,489,2017-11-07 14:27:26,0
+27627,21,1,19,128,2017-11-09 04:01:25,0
+20411,9,1,22,234,2017-11-09 01:53:45,0
+53025,12,1,17,259,2017-11-07 10:50:38,0
+41963,2,1,19,122,2017-11-07 12:12:46,0
+118252,3,1,19,280,2017-11-08 03:00:06,0
+17149,50,0,24,213,2017-11-09 00:59:54,1
+17149,15,1,18,265,2017-11-09 00:54:02,0
+53664,3,1,13,280,2017-11-07 09:10:30,0
+25299,3,1,3,19,2017-11-09 06:06:08,0
+32391,7,1,19,101,2017-11-09 07:27:37,0
+42479,9,1,70,232,2017-11-06 23:49:24,0
+24896,2,1,27,237,2017-11-09 04:48:46,0
+92471,12,1,19,178,2017-11-07 12:24:49,0
+84774,3,1,8,280,2017-11-08 13:28:46,0
+205797,15,1,13,245,2017-11-07 02:50:52,0
+70641,3,1,11,135,2017-11-08 23:33:56,0
+110386,3,1,19,371,2017-11-09 07:54:53,0
+60603,8,1,18,140,2017-11-07 03:42:47,0
+124763,1,1,13,153,2017-11-08 14:36:28,0
+21097,11,1,13,137,2017-11-08 22:12:45,0
+114526,3,1,19,280,2017-11-08 01:16:37,0
+35180,14,1,11,379,2017-11-09 07:19:05,0
+1235,6,1,15,459,2017-11-09 12:33:43,0
+42139,3,2,13,211,2017-11-08 05:58:09,0
+114276,3,2,13,280,2017-11-07 03:17:55,0
+11593,2,1,16,477,2017-11-09 15:30:47,0
+46774,3,1,19,130,2017-11-07 02:06:06,0
+82041,3,1,37,280,2017-11-08 15:30:55,0
+67751,3,1,30,280,2017-11-07 04:38:28,0
+55032,14,1,11,401,2017-11-07 03:07:46,0
+174352,3,1,25,115,2017-11-08 14:32:45,0
+180823,12,1,16,245,2017-11-09 00:50:41,0
+20174,5,1,18,317,2017-11-09 13:18:57,0
+62001,22,1,14,116,2017-11-08 14:10:30,0
+15365,21,2,9,128,2017-11-07 21:41:28,0
+66478,2,1,8,219,2017-11-08 02:38:21,0
+60151,14,2,19,480,2017-11-09 01:01:14,0
+119349,9,1,15,442,2017-11-08 08:02:19,0
+40372,13,1,12,400,2017-11-09 08:56:36,0
+121767,9,1,13,466,2017-11-07 23:29:04,0
+55575,2,1,15,469,2017-11-07 06:37:22,0
+120163,1,1,17,439,2017-11-09 15:22:06,0
+53964,13,1,22,449,2017-11-09 14:03:56,0
+1881,9,1,13,215,2017-11-07 14:54:04,0
+41285,11,1,27,173,2017-11-07 12:56:11,0
+7813,12,1,14,259,2017-11-09 15:43:41,0
+163632,13,1,19,477,2017-11-07 02:56:49,0
+70815,3,1,3,280,2017-11-09 01:40:23,0
+166352,12,1,17,481,2017-11-09 11:12:43,0
+80476,14,1,34,379,2017-11-07 11:57:11,0
+33306,20,1,9,259,2017-11-07 03:15:24,0
+95766,3,1,18,280,2017-11-07 04:54:37,0
+89581,26,1,37,121,2017-11-08 10:06:52,0
+187941,3,1,25,115,2017-11-07 02:56:05,0
+261776,12,1,19,245,2017-11-07 16:38:47,0
+111153,29,1,13,343,2017-11-07 23:52:07,0
+75885,13,1,13,400,2017-11-09 14:24:09,0
+89062,3,1,20,379,2017-11-07 15:35:12,0
+100212,23,1,13,153,2017-11-06 17:13:37,0
+121879,14,1,13,379,2017-11-09 07:25:59,0
+101358,9,1,13,466,2017-11-08 22:52:54,0
+91232,15,1,19,315,2017-11-07 15:14:53,0
+1659,2,1,13,212,2017-11-06 18:39:21,0
+93587,15,1,22,245,2017-11-06 17:37:28,0
+45060,12,1,18,259,2017-11-06 16:16:18,0
+43842,18,1,18,107,2017-11-09 10:26:17,0
+111182,3,1,19,280,2017-11-09 04:39:00,0
+124478,15,1,10,245,2017-11-06 16:01:35,0
+119350,9,1,85,234,2017-11-08 15:17:11,0
+35707,10,1,19,317,2017-11-07 10:45:43,0
+89592,9,1,20,489,2017-11-08 10:58:36,0
+347608,3,1,19,280,2017-11-09 12:25:36,0
+77940,11,1,43,325,2017-11-07 07:43:25,0
+68022,12,1,18,424,2017-11-09 12:27:26,0
+4437,2,1,6,237,2017-11-09 10:47:10,0
+109776,13,1,19,400,2017-11-09 07:22:59,0
+51665,2,1,30,452,2017-11-07 06:17:23,0
+3196,3,1,25,280,2017-11-08 01:04:04,0
+89557,3,1,17,280,2017-11-09 00:43:08,0
+44456,35,1,17,21,2017-11-09 10:22:50,0
+114220,25,1,13,259,2017-11-08 01:04:53,0
+73487,12,1,16,326,2017-11-08 15:23:33,0
+149061,2,1,19,237,2017-11-09 02:51:02,0
+27705,15,1,19,245,2017-11-07 15:04:49,0
+79665,12,1,19,340,2017-11-08 07:57:11,0
+54986,12,1,19,265,2017-11-08 01:22:58,0
+120673,12,1,19,178,2017-11-08 09:04:15,0
+15769,18,1,19,121,2017-11-07 10:05:28,0
+56344,18,1,19,376,2017-11-08 15:22:40,0
+38066,18,1,13,134,2017-11-08 01:00:52,0
+30710,3,1,18,135,2017-11-07 16:15:46,0
+48240,3,1,19,379,2017-11-07 03:08:16,0
+5147,6,1,13,459,2017-11-09 05:17:55,0
+35588,23,1,13,153,2017-11-08 06:39:03,0
+91536,3,1,19,402,2017-11-09 14:37:36,0
+8569,2,1,19,469,2017-11-07 14:40:10,0
+59125,2,1,13,205,2017-11-08 03:39:46,0
+34551,15,1,19,265,2017-11-08 12:55:25,0
+48158,15,1,20,130,2017-11-09 14:50:43,0
+13177,15,1,13,245,2017-11-08 22:56:08,0
+121656,19,0,21,213,2017-11-07 14:43:11,0
+43827,11,1,10,487,2017-11-09 06:37:15,0
+107276,18,1,13,121,2017-11-07 23:38:13,0
+168365,12,1,15,124,2017-11-07 01:14:37,0
+179984,18,1,1,107,2017-11-07 00:47:20,0
+51609,11,1,17,330,2017-11-08 02:53:04,0
+113862,9,1,25,466,2017-11-08 10:52:35,0
+230325,12,1,13,178,2017-11-09 11:20:18,0
+22957,9,1,19,334,2017-11-07 01:33:18,0
+20905,12,1,14,205,2017-11-09 14:41:00,0
+26208,3,1,17,19,2017-11-08 14:26:21,0
+202750,25,1,41,259,2017-11-08 13:24:52,0
+67708,18,1,35,121,2017-11-09 14:59:49,0
+5314,1,1,19,134,2017-11-07 22:55:29,0
+49939,1,1,13,134,2017-11-08 14:26:17,0
+81698,9,1,41,107,2017-11-09 06:12:57,0
+12457,3,1,6,182,2017-11-08 02:27:47,0
+220589,3,2,19,137,2017-11-08 02:52:16,0
+155000,18,1,37,107,2017-11-09 00:59:43,0
+93198,15,1,13,430,2017-11-07 23:50:54,0
+181871,12,1,13,178,2017-11-08 16:51:22,0
+339238,7,1,20,101,2017-11-09 14:33:12,0
+141774,12,1,15,328,2017-11-07 09:35:38,0
+71514,2,1,6,237,2017-11-09 01:36:49,0
+85188,12,1,18,259,2017-11-09 13:14:05,0
+119289,28,1,19,135,2017-11-07 21:13:30,0
+80000,15,1,19,315,2017-11-07 05:27:58,0
+74698,18,3032,607,107,2017-11-07 03:03:44,0
+271118,18,1,19,134,2017-11-08 06:16:21,1
+67734,3,1,13,153,2017-11-08 15:11:42,0
+131859,14,1,18,379,2017-11-08 03:27:40,0
+98317,3,1,19,489,2017-11-08 13:10:20,0
+102082,18,1,13,107,2017-11-07 04:16:13,0
+103251,18,1,23,107,2017-11-07 23:04:44,0
+176539,12,1,6,265,2017-11-09 15:02:47,0
+59426,12,1,18,19,2017-11-07 11:31:15,0
+99427,3,1,19,137,2017-11-09 06:32:13,0
+118367,3,1,1,280,2017-11-07 14:43:36,0
+79187,1,1,19,178,2017-11-06 23:51:44,0
+55898,64,1,48,459,2017-11-08 13:23:32,0
+60453,2,1,13,237,2017-11-08 05:55:29,0
+75007,9,1,1,334,2017-11-07 11:50:05,0
+52628,9,1,18,466,2017-11-08 09:04:15,0
+263183,12,1,9,245,2017-11-08 01:08:51,0
+106541,3,1,13,280,2017-11-08 11:16:32,0
+59391,9,1,22,215,2017-11-09 12:36:19,0
+39480,17,1,20,134,2017-11-09 08:00:08,0
+51818,3,1,41,280,2017-11-09 05:24:41,0
+108783,15,1,20,480,2017-11-08 12:34:46,0
+177178,28,1,13,135,2017-11-08 15:58:04,0
+152977,8,1,37,140,2017-11-09 10:29:30,0
+112106,64,1,19,459,2017-11-07 13:10:43,0
+150844,9,1,13,234,2017-11-09 00:44:42,0
+130810,12,1,10,178,2017-11-09 09:23:04,0
+150625,11,1,13,319,2017-11-07 16:30:22,0
+191759,15,1,18,265,2017-11-09 04:35:23,0
+78910,19,0,24,213,2017-11-08 13:41:07,0
+81807,3,1,6,480,2017-11-06 22:27:50,0
+181243,12,1,18,205,2017-11-08 02:41:33,0
+37515,1,1,13,125,2017-11-07 11:25:56,0
+133598,2,1,22,237,2017-11-07 05:36:28,0
+105388,9,1,20,134,2017-11-07 23:20:58,0
+86402,12,1,1,259,2017-11-08 12:13:11,0
+5348,3,1,28,442,2017-11-06 23:59:36,0
+68491,2,1,16,212,2017-11-09 03:29:16,0
+196973,26,1,41,121,2017-11-07 06:19:25,0
+72543,15,1,13,245,2017-11-07 03:22:06,0
+15829,21,1,13,128,2017-11-08 16:02:16,0
+83525,37,1,19,21,2017-11-07 12:56:27,0
+108527,170,3032,607,347,2017-11-07 15:24:26,0
+74816,12,1,37,245,2017-11-07 05:15:01,0
+4118,21,1,9,128,2017-11-07 13:50:45,0
+95063,1,1,20,153,2017-11-08 14:27:37,0
+108341,12,1,18,245,2017-11-08 05:35:47,0
+298118,8,1,10,145,2017-11-09 04:20:16,0
+37776,21,2,13,128,2017-11-09 11:35:50,0
+43698,18,1,13,439,2017-11-07 16:31:20,0
+109643,3,1,19,280,2017-11-08 00:58:42,0
+124936,12,1,13,205,2017-11-08 10:11:02,0
+237358,18,1,19,449,2017-11-08 02:25:05,0
+28852,2,1,22,477,2017-11-08 11:07:14,0
+103023,9,1,19,334,2017-11-08 12:49:48,0
+193602,8,1,18,145,2017-11-06 16:22:54,0
+37467,14,1,15,480,2017-11-08 07:42:21,0
+93924,15,1,22,111,2017-11-07 00:59:01,0
+100212,2,1,19,435,2017-11-07 15:45:03,0
+95585,9,1,13,215,2017-11-08 15:25:02,0
+206925,3,1,16,173,2017-11-06 21:03:00,0
+25792,24,1,8,105,2017-11-08 21:43:50,0
+5314,21,1,18,128,2017-11-07 13:55:10,0
+108730,2,1,25,377,2017-11-08 12:09:53,0
+1763,3,1,19,205,2017-11-07 13:25:00,0
+64330,18,1,3,107,2017-11-08 10:43:12,0
+44595,15,1,6,3,2017-11-09 13:23:53,0
+73516,8,1,13,145,2017-11-06 16:29:09,0
+12479,2,1,18,205,2017-11-08 14:07:25,0
+26814,18,1,30,134,2017-11-09 13:06:42,0
+98944,12,1,13,178,2017-11-09 08:20:02,0
+136695,2,1,8,236,2017-11-07 02:21:21,0
+99728,9,1,19,232,2017-11-09 12:58:17,0
+51131,25,1,19,259,2017-11-07 05:05:21,0
+99895,12,1,19,259,2017-11-08 10:28:21,0
+96773,15,1,13,245,2017-11-07 07:12:52,0
+72539,12,1,19,265,2017-11-08 01:33:32,0
+88281,2,1,10,477,2017-11-08 04:35:00,0
+83509,3,1,66,442,2017-11-07 22:33:34,0
+75634,2,1,13,219,2017-11-08 11:14:41,0
+172738,3,1,18,211,2017-11-09 12:42:05,0
+92766,9,2,9,234,2017-11-08 19:33:10,0
+88,1,1,22,115,2017-11-08 23:27:25,0
+57519,9,1,30,107,2017-11-09 14:27:55,0
+72000,13,1,19,477,2017-11-09 08:08:46,0
+6750,14,1,18,379,2017-11-09 11:49:49,0
+175643,9,1,19,334,2017-11-09 02:42:06,0
+102511,2,1,18,237,2017-11-09 13:26:27,0
+4052,24,2,36,105,2017-11-07 09:58:35,0
+73839,9,1,9,334,2017-11-09 00:40:34,0
+158889,15,1,13,140,2017-11-09 09:30:47,0
+118190,18,1,10,107,2017-11-07 04:43:08,0
+74918,3,1,19,480,2017-11-08 15:20:05,0
+177121,3,1,22,489,2017-11-07 04:52:09,0
+161299,12,2,10,326,2017-11-07 11:57:35,0
+63624,12,1,16,245,2017-11-07 16:50:37,0
+41142,9,2,13,445,2017-11-09 06:00:16,0
+114409,3,1,13,489,2017-11-07 00:06:48,0
+24008,23,1,20,153,2017-11-06 16:36:01,0
+143423,21,1,19,128,2017-11-07 06:49:13,0
+105024,14,1,41,480,2017-11-07 04:27:20,0
+127896,2,2,41,122,2017-11-07 12:27:00,0
+2407,12,1,4,497,2017-11-08 13:49:00,0
+75463,12,1,19,178,2017-11-09 09:46:55,0
+39480,2,1,19,237,2017-11-07 18:03:11,0
+21069,2,1,3,236,2017-11-08 02:36:29,0
+42646,11,1,19,319,2017-11-09 06:07:02,0
+93587,13,1,9,477,2017-11-09 14:22:47,0
+91885,15,1,5,386,2017-11-06 20:41:49,0
+37763,18,1,6,107,2017-11-09 13:32:05,0
+287997,21,1,13,128,2017-11-08 20:09:07,0
+32575,3,1,8,137,2017-11-08 12:32:09,0
+114802,18,1,13,439,2017-11-07 22:25:20,0
+114620,24,1,17,105,2017-11-07 17:22:01,0
+18580,9,1,17,215,2017-11-07 07:00:30,0
+81416,18,1,13,107,2017-11-09 01:35:55,0
+109001,18,1,17,107,2017-11-07 11:59:54,0
+100176,15,1,19,245,2017-11-07 05:36:22,0
+156330,13,1,16,477,2017-11-07 01:18:24,0
+77799,3,1,19,205,2017-11-08 04:45:21,0
+140948,9,1,8,442,2017-11-07 04:27:05,0
+129121,9,1,19,442,2017-11-07 15:19:38,0
+229807,1,1,25,115,2017-11-08 02:57:43,0
+106279,12,1,22,245,2017-11-08 22:52:44,0
+98476,23,1,13,153,2017-11-08 15:26:29,0
+267945,14,1,8,489,2017-11-08 15:29:13,0
+68929,3,1,19,280,2017-11-08 11:03:30,0
+2728,26,1,13,266,2017-11-07 01:04:21,0
+23505,32,2,65,376,2017-11-09 08:19:50,0
+56516,10,1,32,377,2017-11-09 07:00:44,0
+82011,6,1,20,459,2017-11-07 06:43:27,0
+96987,3,1,13,424,2017-11-09 09:53:06,0
+88951,2,1,19,236,2017-11-09 00:14:11,0
+19140,15,1,19,386,2017-11-07 13:56:16,0
+183108,2,1,25,364,2017-11-08 18:58:58,0
+75830,12,1,16,265,2017-11-08 09:10:31,0
+112455,14,1,32,401,2017-11-08 03:39:19,0
+270863,15,1,8,315,2017-11-08 11:45:14,0
+12787,1,1,18,134,2017-11-09 03:12:43,0
+114276,21,2,13,128,2017-11-07 12:52:11,0
+117167,15,1,18,480,2017-11-07 15:36:42,0
+3363,9,1,19,232,2017-11-07 00:33:10,0
+2696,3,1,17,280,2017-11-08 02:28:54,0
+164157,12,1,15,212,2017-11-08 12:57:20,0
+52225,14,1,19,379,2017-11-09 03:01:23,0
+147482,15,1,27,265,2017-11-07 07:26:05,0
+16589,3,5,45,404,2017-11-09 14:44:45,0
+80416,21,1,13,232,2017-11-09 09:47:49,0
+81287,3,1,19,211,2017-11-08 16:08:52,0
+60925,2,1,20,477,2017-11-09 03:38:20,0
+108881,15,1,17,245,2017-11-08 16:33:52,0
+191902,3,1,12,280,2017-11-07 02:52:36,0
+116607,18,1,13,134,2017-11-07 04:06:21,0
+158804,13,1,19,477,2017-11-07 09:10:41,0
+273904,18,1,35,107,2017-11-08 15:28:00,0
+34714,18,1,17,439,2017-11-08 02:54:10,0
+34432,1,1,27,135,2017-11-07 15:06:08,0
+64531,18,1,40,134,2017-11-09 01:34:44,0
+37142,12,1,20,140,2017-11-09 10:00:27,0
+114054,12,1,27,409,2017-11-08 03:35:57,0
+125027,18,1,4,134,2017-11-07 11:16:20,0
+25131,14,1,19,463,2017-11-07 02:35:56,0
+39175,14,1,16,379,2017-11-07 10:18:36,0
+79493,18,1,13,107,2017-11-06 18:25:43,0
+25766,1,1,13,125,2017-11-07 17:50:22,0
+32441,18,1,17,107,2017-11-07 10:40:34,0
+130769,3,1,37,280,2017-11-08 01:24:06,0
+1091,7,1,37,101,2017-11-09 03:36:18,0
+6313,9,1,35,107,2017-11-09 14:59:43,0
+14381,3,1,22,442,2017-11-09 15:04:35,0
+276708,26,1,20,121,2017-11-09 08:12:01,0
+38247,27,1,41,153,2017-11-07 04:04:22,0
+81587,28,1,10,135,2017-11-07 13:52:11,0
+42149,12,1,19,259,2017-11-09 15:12:27,0
+217915,2,1,19,435,2017-11-07 22:42:15,0
+16999,1,1,19,135,2017-11-09 02:44:28,0
+124462,12,1,13,140,2017-11-07 06:00:55,0
+138815,3,1,37,442,2017-11-07 23:11:14,0
+17292,28,1,19,135,2017-11-07 07:35:18,0
+98781,7,1,19,101,2017-11-09 03:31:46,0
+67606,18,1,3,121,2017-11-07 00:00:32,0
+105239,12,1,44,178,2017-11-07 02:39:18,0
+42167,3,1,10,280,2017-11-08 01:55:47,0
+66015,15,1,3,412,2017-11-07 13:35:00,0
+73823,3,1,22,173,2017-11-09 00:22:23,0
+102206,1,1,22,134,2017-11-06 23:23:11,0
+67745,3,1,17,280,2017-11-08 08:12:23,0
+37183,2,1,19,237,2017-11-08 04:43:26,0
+55184,3,1,18,424,2017-11-08 11:27:04,0
+156715,3,1,13,442,2017-11-09 03:37:04,0
+108131,9,1,19,134,2017-11-08 00:57:01,0
+255,13,1,13,469,2017-11-09 14:01:54,0
+83730,18,1,3,107,2017-11-09 08:40:16,0
+92092,15,1,22,245,2017-11-08 03:49:52,0
+21894,26,1,17,121,2017-11-08 07:09:08,0
+137052,64,1,9,459,2017-11-07 09:58:35,0
+23260,12,1,19,124,2017-11-07 14:34:52,0
+137052,3,1,17,280,2017-11-09 00:15:24,0
+105475,3,2,37,137,2017-11-07 16:59:18,0
+5387,12,1,19,259,2017-11-07 06:18:10,0
+14961,9,1,13,445,2017-11-08 14:28:17,0
+95766,15,1,13,245,2017-11-08 19:21:12,0
+100543,7,1,13,101,2017-11-09 08:15:24,0
+25614,2,1,13,236,2017-11-08 23:14:03,0
+114276,3,1,22,452,2017-11-08 22:27:16,0
+78446,3,1,15,280,2017-11-07 01:53:37,0
+145679,8,1,13,145,2017-11-07 01:47:17,0
+200021,3,1,19,173,2017-11-07 08:36:26,0
+124170,9,1,58,466,2017-11-08 03:25:33,0
+95908,15,1,13,245,2017-11-07 02:58:07,0
+38376,12,1,15,178,2017-11-09 11:09:24,0
+26995,15,2,7,315,2017-11-08 03:49:54,0
+80571,12,1,19,178,2017-11-07 02:45:08,0
+44885,14,1,10,379,2017-11-07 04:57:32,0
+32457,13,1,22,477,2017-11-09 02:02:40,0
+4052,15,2,19,245,2017-11-08 04:04:02,0
+80228,32,1,10,376,2017-11-08 03:14:49,0
+38773,8,1,37,145,2017-11-07 10:47:06,0
+50058,1,1,13,118,2017-11-07 00:13:23,0
+161482,3,1,17,480,2017-11-09 00:11:06,0
+67779,3,1,11,409,2017-11-07 15:51:35,0
+35762,9,1,13,489,2017-11-09 07:59:56,0
+34450,26,1,25,121,2017-11-09 05:45:42,0
+8718,93,1,19,371,2017-11-09 07:33:23,0
+17816,18,1,3,107,2017-11-07 00:45:50,0
+12129,28,1,13,135,2017-11-07 13:17:51,0
+35047,15,1,25,412,2017-11-09 06:19:20,0
+183090,12,1,19,328,2017-11-07 02:49:56,0
+48285,3,1,13,280,2017-11-08 05:15:44,0
+114678,15,1,19,245,2017-11-06 16:42:14,0
+6750,3,1,13,280,2017-11-08 08:45:08,0
+42931,12,1,49,340,2017-11-09 00:03:03,0
+22702,18,1,13,439,2017-11-09 13:40:02,0
+51992,12,1,19,259,2017-11-08 23:09:24,0
+111086,23,1,32,153,2017-11-08 00:04:44,0
+116355,12,1,19,259,2017-11-07 22:19:48,0
+191021,12,2,19,178,2017-11-08 14:41:07,0
+214078,3,1,19,280,2017-11-08 03:41:36,0
+63028,18,1,19,376,2017-11-08 00:36:43,0
+162550,18,1,20,121,2017-11-08 02:33:30,0
+40022,64,1,19,459,2017-11-07 04:55:39,0
+32372,13,1,18,469,2017-11-09 09:02:52,0
+25158,13,1,19,469,2017-11-08 00:28:00,0
+77306,15,1,17,130,2017-11-07 07:54:55,0
+4942,2,1,19,219,2017-11-08 13:34:03,0
+3918,12,1,19,140,2017-11-07 10:09:41,0
+57479,15,1,13,265,2017-11-09 08:37:54,0
+333337,15,1,8,245,2017-11-08 22:16:15,0
+69056,15,1,25,111,2017-11-08 11:37:42,0
+191265,21,1,17,128,2017-11-07 04:41:59,0
+54868,2,1,19,212,2017-11-06 19:08:21,0
+252426,2,1,11,258,2017-11-07 23:18:07,0
+95145,9,1,13,442,2017-11-07 17:03:00,0
+65631,14,1,19,467,2017-11-09 01:19:31,0
+53570,37,1,15,21,2017-11-07 16:36:18,0
+121136,9,1,19,466,2017-11-09 00:19:13,0
+310375,3,1,13,205,2017-11-08 23:32:54,0
+53009,9,1,22,489,2017-11-09 04:28:25,0
+3994,9,2,86,258,2017-11-07 03:45:52,0
+12340,3,1,41,442,2017-11-08 09:20:09,0
+124726,3,1,13,280,2017-11-07 01:38:25,0
+117845,3,1,20,280,2017-11-08 15:06:08,0
+88118,3,1,15,115,2017-11-08 23:56:51,0
+223876,21,1,6,128,2017-11-08 01:42:17,0
+141640,14,1,26,134,2017-11-08 11:46:56,0
+25251,12,1,17,178,2017-11-07 12:26:48,0
+146548,2,1,13,435,2017-11-08 23:14:23,0
+87877,13,1,37,477,2017-11-07 03:23:16,0
+167881,23,1,6,153,2017-11-07 15:09:13,0
+42589,1,1,19,137,2017-11-08 21:29:08,0
+77809,9,1,13,258,2017-11-08 13:21:56,0
+44229,12,1,10,259,2017-11-07 13:21:25,0
+47162,9,1,12,489,2017-11-08 05:32:55,0
+91104,18,1,19,439,2017-11-09 01:43:25,0
+120385,9,1,9,215,2017-11-08 03:18:37,0
+113836,18,1,40,134,2017-11-07 13:21:34,0
+86869,2,1,17,377,2017-11-09 03:53:40,0
+109183,21,1,19,128,2017-11-07 01:17:56,0
+37301,9,1,27,258,2017-11-07 10:24:35,0
+37409,6,1,19,125,2017-11-08 05:23:54,0
+133331,12,1,37,140,2017-11-09 14:40:24,0
+64516,18,1,13,439,2017-11-07 13:37:05,0
+68735,3,1,19,280,2017-11-09 06:17:38,0
+106034,7,1,37,101,2017-11-09 06:49:44,0
+1801,9,1,19,445,2017-11-07 05:54:50,0
+4019,6,1,1,459,2017-11-07 08:30:29,0
+144920,12,1,10,178,2017-11-07 01:32:01,0
+68258,1,1,13,134,2017-11-07 23:37:00,0
+114276,1,1,9,153,2017-11-07 10:24:19,0
+32523,3,1,17,280,2017-11-08 15:11:13,0
+179458,14,1,17,442,2017-11-08 14:43:14,0
+119262,1,1,17,452,2017-11-07 05:20:56,0
+92673,12,1,19,259,2017-11-06 22:59:56,0
+195892,3,1,17,409,2017-11-07 00:03:18,0
+61900,9,1,19,466,2017-11-09 14:44:59,0
+26481,15,1,10,245,2017-11-06 21:16:25,0
+114468,9,1,11,489,2017-11-08 00:01:45,0
+34087,21,1,18,128,2017-11-09 07:44:44,0
+152307,28,1,13,317,2017-11-08 02:18:24,0
+101001,26,1,19,121,2017-11-09 07:01:52,0
+28183,12,1,19,265,2017-11-06 17:42:54,0
+149177,2,1,47,236,2017-11-06 22:57:02,0
+81896,12,1,23,259,2017-11-08 15:51:18,0
+85625,4,1,18,101,2017-11-09 13:28:33,0
+145896,27,1,19,153,2017-11-08 12:37:11,0
+209663,3,1,19,489,2017-11-08 12:58:12,0
+199067,9,1,17,442,2017-11-09 15:35:30,0
+168564,3,1,15,137,2017-11-08 14:21:32,0
+126200,15,1,13,430,2017-11-09 15:32:07,0
+41463,3,1,37,442,2017-11-09 04:49:07,0
+114220,21,1,13,232,2017-11-09 14:17:47,0
+17149,3,1,22,280,2017-11-09 05:18:22,0
+11907,12,1,1,265,2017-11-09 06:31:48,0
+40631,3,1,17,480,2017-11-09 00:13:16,0
+56400,3,1,13,424,2017-11-08 10:56:32,0
+137509,14,1,15,480,2017-11-09 03:39:27,0
+11232,3,1,19,280,2017-11-08 01:04:40,0
+146208,2,1,9,237,2017-11-08 10:11:34,0
+12340,26,1,6,477,2017-11-09 04:09:08,0
+7134,28,1,30,135,2017-11-07 11:47:19,0
+193045,3,1,13,115,2017-11-09 14:27:56,0
+11039,7,1,18,101,2017-11-09 03:31:06,0
+55430,7,1,10,101,2017-11-07 11:38:36,0
+69331,3,1,4,409,2017-11-07 04:08:30,0
+30404,18,1,19,439,2017-11-08 16:33:18,0
+29533,2,1,17,237,2017-11-09 05:02:08,0
+82846,9,1,6,489,2017-11-09 12:53:49,0
+112198,9,1,19,127,2017-11-09 12:29:03,0
+6037,3,1,37,424,2017-11-07 09:43:43,0
+68014,3,1,17,115,2017-11-09 02:04:26,0
+150099,3,1,13,280,2017-11-07 03:18:26,0
+77763,9,1,10,134,2017-11-07 16:27:20,0
+164701,5,1,47,377,2017-11-07 13:00:08,0
+66240,19,0,24,213,2017-11-09 08:33:04,0
+122110,2,1,3,237,2017-11-07 10:20:29,0
+83535,2,1,13,435,2017-11-07 03:13:09,0
+85107,20,1,19,259,2017-11-08 22:32:39,0
+25761,8,1,9,145,2017-11-09 06:14:31,0
+28222,26,1,20,121,2017-11-07 11:00:33,0
+14034,15,1,19,140,2017-11-09 07:06:40,0
+40056,94,1,22,361,2017-11-09 13:22:54,0
+31652,23,1,19,153,2017-11-08 03:02:38,0
+103079,18,1,27,107,2017-11-09 10:24:12,0
+100356,2,1,47,469,2017-11-08 15:50:24,0
+232067,21,1,19,232,2017-11-09 10:39:59,0
+42651,23,1,18,153,2017-11-07 23:11:16,0
+54868,3,1,16,130,2017-11-07 09:15:30,0
+112078,9,1,18,442,2017-11-08 03:27:02,0
+166433,3,1,19,130,2017-11-07 13:58:14,0
+49360,9,2,2,442,2017-11-08 12:13:27,0
+56412,2,1,13,237,2017-11-09 03:30:12,0
+50136,3,1,22,280,2017-11-07 08:07:55,0
+100929,18,1,47,121,2017-11-07 02:43:35,0
+68890,9,1,31,466,2017-11-08 12:48:46,0
+18812,9,1,6,442,2017-11-07 10:16:07,0
+39049,15,1,13,245,2017-11-07 18:25:33,0
+74515,3,1,19,489,2017-11-07 12:36:57,0
+15926,15,1,4,265,2017-11-07 04:45:10,0
+47456,18,1,6,439,2017-11-07 11:25:14,0
+108204,3,1,19,280,2017-11-08 03:01:54,0
+81456,7,1,8,101,2017-11-09 14:54:43,0
+81997,3,1,13,442,2017-11-08 10:06:44,0
+27036,14,1,19,489,2017-11-08 18:16:49,0
+8580,3,1,13,379,2017-11-08 07:36:29,0
+101981,22,1,13,116,2017-11-09 01:39:52,0
+86767,12,1,37,105,2017-11-07 07:54:46,0
+154432,3,1,19,280,2017-11-09 06:02:41,0
+131779,14,1,53,442,2017-11-08 04:53:59,0
+161807,36,1,13,373,2017-11-07 01:16:46,0
+279854,3,1,25,280,2017-11-09 06:04:09,0
+26409,18,1,19,439,2017-11-09 10:38:47,0
+135983,24,1,17,105,2017-11-07 03:20:15,0
+2210,15,1,13,265,2017-11-07 02:39:31,0
+26531,2,1,22,469,2017-11-07 05:11:00,0
+58472,9,1,13,258,2017-11-07 02:00:21,0
+73516,27,1,9,153,2017-11-07 23:45:33,0
+88977,14,1,22,463,2017-11-07 04:15:26,0
+53058,3,1,19,137,2017-11-08 21:48:51,0
+75595,10,1,13,317,2017-11-09 11:37:31,0
+115039,12,1,19,328,2017-11-07 01:12:34,0
+65937,8,1,13,145,2017-11-09 06:44:03,0
+114276,12,1,2,265,2017-11-08 07:46:58,0
+84860,1,1,19,150,2017-11-09 06:31:23,0
+43020,3,1,13,280,2017-11-07 03:54:26,0
+114341,9,1,41,232,2017-11-06 19:20:00,0
+91048,26,1,17,266,2017-11-06 17:31:08,0
+77147,15,1,19,278,2017-11-07 01:38:26,0
+211177,3,1,13,205,2017-11-09 06:06:31,0
+90528,15,1,41,245,2017-11-08 23:54:52,0
+157634,12,1,17,178,2017-11-07 07:36:37,0
+24905,15,1,1,245,2017-11-09 15:35:33,0
+103125,18,3032,607,107,2017-11-07 06:15:01,0
+7265,12,1,19,140,2017-11-07 06:38:19,0
+305086,18,1,20,449,2017-11-09 08:30:03,0
+10434,2,1,13,477,2017-11-07 19:36:17,0
+99836,9,1,19,215,2017-11-08 03:25:39,0
+150484,15,1,11,245,2017-11-07 11:19:48,0
+94267,23,1,13,153,2017-11-09 08:54:17,0
+49652,15,2,13,245,2017-11-07 12:23:22,0
+55179,12,1,17,178,2017-11-08 12:21:55,0
+114702,9,1,18,445,2017-11-09 00:33:40,0
+136702,3,1,19,280,2017-11-07 11:11:51,0
+146625,3,1,37,379,2017-11-07 00:32:25,0
+107775,15,1,19,278,2017-11-07 14:40:25,0
+37458,9,1,22,127,2017-11-09 12:11:35,0
+55957,14,2,9,401,2017-11-07 13:40:41,0
+38219,14,1,19,401,2017-11-08 12:34:59,0
+42103,12,1,8,265,2017-11-08 09:31:05,0
+53806,3,1,25,317,2017-11-08 07:28:18,0
+322543,2,1,16,477,2017-11-08 18:41:53,0
+83614,8,1,25,145,2017-11-09 10:12:15,0
+201285,2,1,19,243,2017-11-09 05:30:29,0
+4052,12,2,3,140,2017-11-07 10:02:34,0
+44256,2,1,10,219,2017-11-09 09:24:52,0
+105560,1,1,6,134,2017-11-08 18:04:29,0
+48212,15,2,6,140,2017-11-08 08:02:21,0
+204824,14,1,27,463,2017-11-09 00:39:56,0
+111324,3,1,17,489,2017-11-09 03:06:28,0
+110500,3,1,15,280,2017-11-08 11:45:07,0
+15967,11,1,37,173,2017-11-08 15:57:05,0
+37836,5,1,20,377,2017-11-08 06:01:36,0
+149813,18,1,14,376,2017-11-07 02:39:54,0
+48418,15,1,13,480,2017-11-06 16:51:40,0
+49034,9,1,25,234,2017-11-06 17:30:06,0
+63790,15,1,13,430,2017-11-08 11:34:19,0
+5314,3,1,13,280,2017-11-07 06:05:40,0
+94407,3,1,13,421,2017-11-09 12:23:15,0
+43650,5,1,13,377,2017-11-07 02:59:19,0
+80357,3,1,19,211,2017-11-08 08:04:15,0
+24361,6,1,13,459,2017-11-08 02:18:16,0
+14041,6,1,19,459,2017-11-09 04:56:26,0
+63527,3,1,49,137,2017-11-09 00:13:45,0
+190388,15,1,19,153,2017-11-07 01:20:56,0
+62874,14,1,16,467,2017-11-09 10:42:37,0
+106524,12,1,18,259,2017-11-07 19:39:56,0
+22978,3,1,13,280,2017-11-09 01:32:11,0
+37948,13,1,13,477,2017-11-09 00:51:51,0
+30781,15,1,17,412,2017-11-07 03:06:19,0
+79456,2,1,17,122,2017-11-09 08:59:42,0
+279,3,1,14,280,2017-11-08 13:57:25,0
+102275,9,1,19,134,2017-11-06 17:57:14,0
+40898,18,1,6,107,2017-11-06 23:17:41,0
+25792,3,1,11,280,2017-11-08 13:43:27,0
+43417,18,1,8,134,2017-11-09 06:12:36,0
+100393,12,1,3,277,2017-11-08 01:07:02,0
+563,18,3032,607,107,2017-11-07 12:25:56,0
+147610,6,1,16,125,2017-11-08 08:38:10,0
+178851,2,1,15,205,2017-11-07 13:35:44,0
+103502,18,3032,607,107,2017-11-07 14:45:20,0
+95324,18,1,18,121,2017-11-08 17:50:07,0
+129667,21,1,13,128,2017-11-06 16:05:30,0
+82449,2,1,13,122,2017-11-07 15:42:54,0
+44377,2,1,19,219,2017-11-08 06:56:18,0
+118190,25,1,22,259,2017-11-07 04:56:22,0
+62639,13,1,37,400,2017-11-07 05:53:57,0
+109776,8,1,6,145,2017-11-06 16:59:02,0
+119830,18,1,58,107,2017-11-09 01:23:14,0
+146001,7,1,19,101,2017-11-07 10:21:19,0
+41780,2,1,22,122,2017-11-09 05:06:36,0
+55385,17,1,19,280,2017-11-06 16:38:52,0
+118315,3,1,19,442,2017-11-09 15:35:26,0
+204953,2,1,32,477,2017-11-07 03:32:27,0
+106362,20,2,18,259,2017-11-06 16:16:14,0
+86552,9,1,20,466,2017-11-09 05:56:51,0
+15046,15,1,13,245,2017-11-07 04:01:27,0
+109851,9,1,31,127,2017-11-09 08:57:16,0
+43447,3,1,19,137,2017-11-08 00:21:55,0
+75991,12,2,19,245,2017-11-09 00:51:32,0
+12610,26,1,13,477,2017-11-09 04:09:53,0
+120056,12,1,8,245,2017-11-08 07:21:16,0
+48384,2,1,19,452,2017-11-08 08:01:19,0
+26889,18,1,1,439,2017-11-07 04:35:42,0
+200066,27,1,3,153,2017-11-07 06:38:09,0
+37140,18,1,41,134,2017-11-09 05:59:21,0
+73382,14,1,13,379,2017-11-09 00:59:37,0
+37770,3,1,18,280,2017-11-08 02:34:32,0
+85208,18,1,19,121,2017-11-06 22:10:41,0
+39684,1,1,1,115,2017-11-08 11:21:14,0
+19023,12,1,19,178,2017-11-09 02:52:47,0
+44673,18,1,47,439,2017-11-08 12:47:27,0
+65937,2,1,19,237,2017-11-09 06:00:35,0
+221694,9,2,13,134,2017-11-08 16:02:08,0
+103147,9,2,13,232,2017-11-09 10:51:36,0
+12031,2,1,18,237,2017-11-09 06:35:13,0
+5314,15,1,13,278,2017-11-07 17:09:01,0
+103899,18,1,19,107,2017-11-08 16:42:40,0
+18703,2,1,13,205,2017-11-07 05:10:23,0
+55161,9,1,19,232,2017-11-07 07:07:54,0
+123586,3,1,19,317,2017-11-08 08:33:07,0
+43349,21,1,19,232,2017-11-09 02:08:23,0
+104366,3,1,15,280,2017-11-09 06:51:36,0
+48212,12,1,19,265,2017-11-09 13:24:45,0
+81812,12,1,10,259,2017-11-07 11:35:10,0
+44327,13,1,10,477,2017-11-08 00:13:17,0
+63840,14,2,9,480,2017-11-07 05:59:53,0
+140993,18,1,8,121,2017-11-07 09:43:06,0
+17853,13,1,15,477,2017-11-07 06:24:03,0
+239518,15,1,19,245,2017-11-07 16:53:01,0
+95631,1,1,53,135,2017-11-07 12:14:24,0
+170171,3,1,1,280,2017-11-08 04:53:23,0
+275322,17,1,20,280,2017-11-08 17:56:03,0
+189640,15,1,19,153,2017-11-07 03:14:50,0
+197864,26,1,19,121,2017-11-09 10:53:53,0
+5348,2,1,13,258,2017-11-09 03:26:13,0
+925,15,1,13,245,2017-11-07 16:14:24,0
+100869,15,1,13,386,2017-11-08 08:10:20,0
+119040,15,1,13,412,2017-11-09 02:12:35,0
+83268,9,1,13,232,2017-11-09 08:05:46,0
+105587,15,1,13,245,2017-11-07 00:16:18,0
+86767,9,1,13,442,2017-11-08 05:00:19,0
+125222,9,1,41,442,2017-11-08 03:00:10,0
+35221,13,1,22,477,2017-11-08 03:10:14,0
+48796,3,1,22,442,2017-11-09 00:47:03,0
+30824,15,1,30,315,2017-11-08 05:42:27,0
+108568,12,1,20,178,2017-11-07 09:37:37,0
+92766,8,1,27,259,2017-11-07 20:28:13,0
+36045,12,1,8,497,2017-11-09 05:25:02,0
+108914,12,1,41,481,2017-11-07 01:47:36,0
+74068,3,1,13,280,2017-11-08 08:14:34,0
+300711,15,1,6,245,2017-11-09 05:07:46,0
+7595,12,1,10,265,2017-11-07 15:16:53,0
+34137,12,1,10,265,2017-11-08 21:05:51,0
+32286,3,1,19,480,2017-11-06 17:31:33,0
+91734,15,1,19,245,2017-11-09 05:02:17,0
+83616,18,1,20,134,2017-11-07 18:07:22,0
+100333,13,1,19,477,2017-11-09 07:57:03,0
+43793,10,1,19,377,2017-11-06 23:22:04,0
+259489,12,1,13,259,2017-11-08 10:48:24,0
+76979,13,1,22,477,2017-11-07 11:40:28,0
+1477,18,1,20,107,2017-11-08 13:48:02,0
+84640,18,1,19,121,2017-11-08 02:21:28,0
+77619,9,1,19,244,2017-11-08 09:58:26,0
+317869,12,1,13,178,2017-11-09 08:14:40,0
+134616,12,1,27,265,2017-11-08 06:03:32,0
+97571,3,1,36,371,2017-11-07 00:27:53,0
+59456,3,1,17,280,2017-11-08 00:37:03,0
+9592,9,1,13,466,2017-11-08 14:36:20,0
+76792,13,1,19,469,2017-11-08 04:48:51,0
+67384,3,1,13,379,2017-11-09 11:48:30,0
+69395,2,1,8,237,2017-11-09 03:08:52,0
+57891,21,1,18,128,2017-11-09 00:46:03,0
+121564,2,1,19,236,2017-11-08 02:09:28,0
+15769,18,1,13,107,2017-11-08 15:38:45,0
+16144,12,1,17,265,2017-11-09 09:26:53,0
+91250,8,1,17,145,2017-11-09 05:02:57,0
+81419,12,2,13,105,2017-11-08 10:43:48,0
+125436,1,1,18,24,2017-11-07 04:36:37,0
+88071,14,1,19,480,2017-11-09 04:45:30,0
+123759,14,1,10,480,2017-11-08 23:42:01,0
+43250,9,1,14,215,2017-11-08 05:47:55,0
+114314,21,1,19,232,2017-11-09 08:36:27,0
+216532,12,1,17,328,2017-11-08 10:01:11,0
+203749,3,1,19,466,2017-11-09 15:05:23,0
+125141,14,1,9,379,2017-11-08 02:25:34,0
+7690,9,2,17,466,2017-11-08 15:04:49,0
+75489,25,1,6,259,2017-11-06 16:41:13,0
+9592,9,1,17,489,2017-11-08 02:51:30,0
+85329,9,1,16,466,2017-11-09 08:56:43,0
+15290,13,1,8,477,2017-11-06 17:46:21,0
+33008,29,2,13,343,2017-11-06 22:48:04,0
+44744,3,1,19,280,2017-11-09 10:51:55,0
+114490,2,2,65,205,2017-11-07 05:30:22,0
+101300,2,1,19,469,2017-11-08 00:20:22,0
+183755,14,1,19,379,2017-11-07 05:32:50,0
+76178,12,1,28,245,2017-11-08 10:52:35,0
+25705,13,1,19,477,2017-11-09 10:32:16,0
+249138,11,1,35,481,2017-11-08 00:38:00,0
+16156,9,1,13,334,2017-11-07 11:41:17,0
+217578,23,1,19,153,2017-11-08 09:29:43,0
+119262,9,1,41,466,2017-11-08 12:36:52,0
+43827,9,1,13,466,2017-11-09 12:22:08,0
+86767,2,1,10,212,2017-11-07 14:19:16,0
+38602,3,1,19,280,2017-11-09 00:17:41,0
+99856,18,2,27,107,2017-11-09 10:42:46,0
+106279,18,1,3,107,2017-11-09 00:06:29,0
+32985,14,1,14,439,2017-11-09 00:34:55,0
+103715,9,1,13,334,2017-11-08 11:51:05,0
+37617,12,1,22,178,2017-11-07 00:52:39,0
+3513,14,1,19,379,2017-11-07 00:43:50,0
+5761,2,1,17,237,2017-11-09 14:37:26,0
+31240,2,1,1,364,2017-11-08 14:35:19,0
+103284,18,1,41,134,2017-11-08 07:06:54,0
+73487,2,1,6,122,2017-11-07 13:27:41,0
+33201,14,1,13,371,2017-11-08 00:35:55,0
+17191,2,1,22,469,2017-11-09 13:20:44,0
+37183,3,1,13,280,2017-11-08 16:14:17,0
+93230,2,1,13,205,2017-11-09 06:31:29,0
+186512,8,1,19,145,2017-11-06 16:00:48,0
+87459,20,1,22,259,2017-11-07 16:32:42,0
+116642,6,1,13,459,2017-11-08 19:38:41,0
+2564,12,1,7,409,2017-11-09 13:05:29,0
+84450,26,1,15,266,2017-11-09 12:47:03,0
+38648,18,1,53,107,2017-11-08 06:11:13,0
+41666,2,2,28,122,2017-11-09 14:44:45,0
+92673,18,1,19,121,2017-11-08 11:21:15,0
+145951,18,1,10,134,2017-11-07 15:29:41,0
+15815,26,1,19,121,2017-11-09 14:28:45,0
+1519,12,1,19,178,2017-11-08 11:52:35,0
+316348,20,1,10,259,2017-11-09 07:02:55,0
+162150,12,1,22,178,2017-11-09 01:58:15,0
+158703,12,1,13,340,2017-11-09 14:29:16,0
+44458,15,1,30,245,2017-11-09 05:33:08,0
+107025,18,1,19,121,2017-11-09 06:16:21,0
+45373,3,1,13,480,2017-11-09 09:05:06,0
+81792,13,1,47,469,2017-11-07 05:08:04,0
+123586,9,1,13,334,2017-11-08 06:09:30,0
+119844,9,1,18,334,2017-11-08 06:50:52,0
+108858,14,1,19,379,2017-11-09 09:14:28,0
+181770,12,1,35,124,2017-11-07 00:08:29,0
+111299,3,1,19,19,2017-11-08 06:31:05,0
+239385,9,1,13,127,2017-11-09 11:54:57,0
+64435,2,1,20,212,2017-11-08 03:35:41,0
+92846,9,1,22,466,2017-11-08 00:43:41,0
+165072,18,1,37,134,2017-11-07 11:01:05,0
+88744,20,1,19,259,2017-11-07 08:46:15,0
+232378,2,1,19,237,2017-11-07 22:51:40,0
+53479,3,1,10,19,2017-11-07 12:40:18,0
+246429,18,1,23,107,2017-11-07 23:32:32,0
+82943,3,1,19,211,2017-11-08 15:26:35,0
+207815,13,1,13,477,2017-11-07 05:45:47,0
+195346,12,1,13,178,2017-11-09 06:42:07,0
+141633,8,1,15,140,2017-11-07 13:06:09,0
+176799,12,1,3,409,2017-11-06 16:17:49,0
+92767,18,1,14,134,2017-11-06 16:01:34,0
+89959,1,2,9,134,2017-11-08 13:55:27,0
+102280,18,1,13,107,2017-11-08 08:42:01,0
+120378,3,1,35,137,2017-11-09 04:47:54,0
+116427,8,1,15,145,2017-11-09 07:25:13,0
+123763,2,1,19,469,2017-11-09 05:22:54,0
+43793,15,1,13,430,2017-11-09 09:51:14,0
+135049,18,1,19,121,2017-11-08 04:45:34,0
+305354,9,1,8,244,2017-11-09 08:37:10,0
+5314,18,1,9,134,2017-11-08 15:16:52,0
+109258,3,1,19,480,2017-11-09 11:09:14,0
+18942,6,1,19,459,2017-11-07 09:28:41,0
+128777,9,1,19,232,2017-11-09 09:54:43,0
+39027,3,1,19,280,2017-11-08 07:32:41,0
+66769,26,1,19,121,2017-11-07 00:25:49,0
+309771,3,1,8,280,2017-11-09 04:25:06,0
+105475,3,1,13,280,2017-11-08 12:06:35,0
+38602,3,1,22,280,2017-11-08 02:48:30,0
+226,15,1,15,245,2017-11-09 04:22:01,0
+69034,1,1,13,17,2017-11-07 22:06:49,0
+148745,2,1,19,219,2017-11-07 10:15:00,0
+40887,11,1,13,319,2017-11-08 00:13:47,0
+25905,18,1,10,107,2017-11-08 04:52:30,0
+120159,11,1,22,325,2017-11-08 00:08:40,0
+93920,3,1,13,137,2017-11-09 02:47:44,0
+96752,3,1,20,280,2017-11-07 03:47:50,0
+155890,15,1,17,245,2017-11-07 00:29:50,0
+62954,8,1,19,145,2017-11-06 18:24:44,0
+44299,11,1,19,137,2017-11-09 07:09:27,0
+90816,12,1,19,140,2017-11-09 14:06:53,0
+51361,9,1,16,244,2017-11-07 10:02:58,0
+136561,15,1,19,379,2017-11-06 23:43:16,0
+124024,9,1,13,466,2017-11-07 11:09:50,0
+104906,3,1,6,280,2017-11-09 05:10:34,0
+115634,2,1,19,205,2017-11-08 06:22:09,0
+95982,3,1,19,424,2017-11-07 00:03:22,0
+76749,14,1,3,489,2017-11-08 12:10:38,0
+105475,21,2,37,128,2017-11-07 16:06:25,0
+114314,9,2,13,442,2017-11-07 00:22:36,0
+56754,15,1,27,245,2017-11-07 18:29:41,0
+16462,14,1,10,480,2017-11-07 05:11:26,0
+240190,64,1,22,459,2017-11-08 00:33:55,0
+118722,14,1,25,439,2017-11-07 00:48:53,0
+83025,15,1,19,379,2017-11-07 05:50:28,0
+65724,3,1,17,452,2017-11-08 15:30:20,0
+83069,2,1,14,236,2017-11-08 02:52:35,0
+125962,11,1,31,137,2017-11-09 11:51:46,0
+18676,2,1,41,477,2017-11-08 14:24:11,0
+34751,9,1,19,127,2017-11-09 11:23:32,0
+26481,12,1,17,265,2017-11-07 10:50:56,0
+625,12,1,13,259,2017-11-08 09:19:24,0
+37207,12,1,41,340,2017-11-08 07:06:03,0
+93587,9,1,2,134,2017-11-08 21:10:10,0
+45432,15,1,19,265,2017-11-07 10:24:09,0
+32619,14,1,13,379,2017-11-09 07:19:27,0
+138781,18,1,41,107,2017-11-08 08:19:23,0
+17149,3,2,9,280,2017-11-08 11:31:08,0
+75415,12,1,19,265,2017-11-09 09:29:17,0
+98384,3,1,13,205,2017-11-07 01:12:54,0
+231958,18,1,53,439,2017-11-08 09:01:40,0
+45591,12,1,13,178,2017-11-07 04:40:47,0
+58813,11,1,19,487,2017-11-09 04:46:12,0
+37559,12,1,13,19,2017-11-07 02:38:10,0
+73233,2,1,13,401,2017-11-08 11:20:48,0
+109173,18,1,37,107,2017-11-08 15:09:14,0
+160887,15,1,13,245,2017-11-07 18:09:54,0
+64619,6,1,19,125,2017-11-08 13:10:22,0
+22978,18,1,19,107,2017-11-09 12:16:38,0
+98261,3,1,31,280,2017-11-07 04:47:32,0
+53665,14,1,19,439,2017-11-08 17:54:21,0
+108103,26,1,12,121,2017-11-07 06:34:57,0
+69449,21,1,10,232,2017-11-09 13:44:11,0
+16760,3,1,13,442,2017-11-09 13:49:15,0
+101878,15,1,19,245,2017-11-07 03:38:35,0
+105560,13,1,22,477,2017-11-07 17:11:09,0
+106287,3,1,22,442,2017-11-07 13:29:23,0
+74323,3,1,19,137,2017-11-07 05:47:49,0
+194354,12,1,13,105,2017-11-07 12:15:47,0
+276831,1,1,19,135,2017-11-08 08:59:16,0
+106537,2,1,13,477,2017-11-08 19:43:36,0
+72967,13,1,19,477,2017-11-08 10:21:03,0
+125062,21,1,17,128,2017-11-08 05:59:41,0
+108913,9,1,9,466,2017-11-08 12:11:01,0
+18666,3,1,16,280,2017-11-08 15:35:36,0
+80114,28,1,13,135,2017-11-08 15:17:34,0
+357260,2,1,13,122,2017-11-09 02:43:29,0
+12479,2,1,2,237,2017-11-07 21:30:56,0
+53479,1,2,9,134,2017-11-08 15:17:37,0
+150579,15,1,17,245,2017-11-08 06:17:52,0
+5178,21,1,2,232,2017-11-09 08:21:19,0
+151188,2,1,19,205,2017-11-06 17:14:32,0
+121278,26,1,28,121,2017-11-08 02:09:00,0
+3774,14,1,19,349,2017-11-07 08:43:14,0
+43233,26,1,13,121,2017-11-06 23:10:26,0
+85625,1,1,8,452,2017-11-06 22:36:38,0
+8391,12,1,28,245,2017-11-07 07:51:30,0
+159945,11,1,13,469,2017-11-08 22:35:38,0
+67494,3,1,17,153,2017-11-06 17:35:24,0
+48488,2,1,19,477,2017-11-08 16:36:50,0
+78423,9,1,13,334,2017-11-09 05:53:22,0
+50033,9,1,9,215,2017-11-07 11:59:55,0
+47148,15,1,19,153,2017-11-08 17:58:53,0
+44663,2,1,13,205,2017-11-09 14:00:20,0
+18869,15,1,13,315,2017-11-07 07:44:13,0
+51060,13,1,19,469,2017-11-06 18:16:27,0
+104991,12,1,23,259,2017-11-07 14:57:39,0
+24943,8,1,19,145,2017-11-09 12:08:26,0
+73516,18,1,15,121,2017-11-09 07:29:01,0
+235509,2,1,10,452,2017-11-09 09:11:43,0
+33060,21,1,41,232,2017-11-09 15:36:00,0
+73516,12,2,9,178,2017-11-09 01:11:52,0
+43044,3,1,13,130,2017-11-06 23:38:51,0
+111227,9,1,13,466,2017-11-09 03:51:58,0
+50055,2,1,41,469,2017-11-08 01:57:13,0
+5147,9,1,25,489,2017-11-09 10:24:32,0
+70260,8,1,19,145,2017-11-07 22:33:33,0
+124608,2,1,3,236,2017-11-08 01:03:53,0
+248917,2,1,13,477,2017-11-08 14:58:11,0
+137775,94,1,15,361,2017-11-08 02:46:04,0
+61120,8,1,20,145,2017-11-09 05:42:44,0
+57000,20,2,22,259,2017-11-06 23:26:57,0
+8171,11,1,13,319,2017-11-07 01:41:28,0
+45655,9,1,19,215,2017-11-09 15:46:51,0
+105560,15,1,22,480,2017-11-09 14:59:59,0
+66397,12,1,10,140,2017-11-07 23:43:55,0
+95766,12,1,19,245,2017-11-08 15:52:04,0
+109851,9,1,13,134,2017-11-09 00:38:16,0
+149458,25,1,13,259,2017-11-07 02:05:42,0
+93808,3,1,19,19,2017-11-08 11:32:57,0
+67169,9,1,13,127,2017-11-09 08:27:12,0
+86767,9,1,19,334,2017-11-07 00:26:54,0
+5314,21,1,13,128,2017-11-09 05:28:08,0
+74924,8,1,19,145,2017-11-09 06:25:22,0
+99482,15,1,17,386,2017-11-07 14:33:20,0
+1586,2,1,18,477,2017-11-09 03:34:29,0
+100065,18,1,13,439,2017-11-07 10:46:55,0
+99927,12,1,12,245,2017-11-08 14:23:37,0
+115671,23,1,13,153,2017-11-08 05:04:41,0
+89272,23,1,18,153,2017-11-09 00:17:10,0
+144934,28,1,19,135,2017-11-06 23:26:53,0
+125222,20,2,17,259,2017-11-09 10:50:22,0
+7481,11,1,6,319,2017-11-08 15:37:52,0
+5348,14,2,13,442,2017-11-07 10:14:22,0
+114122,18,1,19,107,2017-11-09 01:54:16,0
+4542,14,1,13,489,2017-11-07 03:37:08,0
+99181,12,1,22,245,2017-11-07 05:43:57,0
+80306,12,1,17,328,2017-11-08 13:15:54,0
+81698,18,1,13,121,2017-11-07 03:15:40,0
+47664,2,1,13,435,2017-11-08 09:25:51,0
+103164,2,1,8,219,2017-11-09 05:06:12,0
+242177,18,1,17,107,2017-11-09 13:36:23,0
+63893,2,1,9,237,2017-11-08 05:37:08,0
+360796,72,1,22,101,2017-11-08 23:30:10,1
+69173,3,1,13,280,2017-11-09 02:44:24,0
+62968,14,1,19,480,2017-11-09 01:17:02,0
+7709,9,1,19,489,2017-11-08 19:34:02,0
+157480,2,1,10,219,2017-11-09 12:14:29,0
+80273,15,1,10,379,2017-11-08 17:30:56,0
+6721,12,1,43,265,2017-11-09 10:51:20,0
+78355,23,1,22,153,2017-11-07 04:14:45,0
+127081,2,2,9,435,2017-11-06 20:42:24,0
+189134,2,1,19,205,2017-11-07 01:49:00,0
+87797,12,1,17,205,2017-11-07 05:19:41,0
+80046,12,1,13,245,2017-11-08 16:46:17,0
+51859,12,1,19,178,2017-11-09 10:33:36,0
+5135,3,1,18,371,2017-11-07 00:39:52,0
+69577,2,1,19,452,2017-11-09 13:38:37,0
+226319,64,1,23,459,2017-11-08 09:02:13,0
+141447,15,1,13,386,2017-11-07 05:03:26,0
+184453,2,1,19,452,2017-11-06 17:21:30,0
+50375,2,1,10,237,2017-11-09 07:57:08,0
+43793,12,2,13,145,2017-11-09 12:21:43,0
+191531,18,1,8,107,2017-11-07 03:28:03,0
+43395,12,1,19,19,2017-11-08 05:01:39,0
+43086,15,1,10,111,2017-11-07 09:21:05,0
+137667,26,1,2,121,2017-11-09 15:19:19,0
+17149,55,1,13,406,2017-11-09 08:26:08,0
+67350,2,1,27,469,2017-11-07 11:14:55,0
+104199,14,1,27,349,2017-11-09 12:42:09,0
+207271,1,1,55,24,2017-11-07 08:22:58,0
+3313,3,1,19,280,2017-11-08 10:55:35,0
+80485,3,1,8,280,2017-11-07 07:26:54,0
+74515,15,1,17,245,2017-11-08 10:55:44,0
+111897,64,1,23,459,2017-11-07 21:59:38,0
+8215,2,1,12,477,2017-11-09 07:49:51,0
+122253,2,1,19,477,2017-11-07 14:09:11,0
+2919,8,1,19,145,2017-11-09 03:43:49,0
+23674,7,1,22,101,2017-11-09 06:23:28,0
+111588,13,1,19,469,2017-11-08 14:26:34,0
+209663,2,1,22,477,2017-11-07 13:59:39,0
+210107,12,1,14,245,2017-11-06 18:17:22,0
+68891,21,1,19,128,2017-11-06 23:05:04,0
+32391,2,1,13,469,2017-11-08 11:08:44,0
+925,18,1,19,107,2017-11-08 02:30:35,0
+117712,27,1,19,153,2017-11-09 10:42:23,0
+297411,1,1,3,17,2017-11-09 12:52:35,0
+85256,3,1,13,211,2017-11-08 08:06:08,0
+359548,3,1,19,442,2017-11-08 22:58:26,0
+193958,3,1,28,280,2017-11-08 01:29:35,0
+27388,9,1,19,215,2017-11-07 14:44:15,0
+119303,19,0,38,213,2017-11-08 16:05:46,0
+22943,3,1,19,424,2017-11-09 06:46:16,0
+204888,3,1,15,280,2017-11-07 06:25:28,0
+46729,1,2,9,125,2017-11-07 09:19:25,0
+83268,3,1,10,137,2017-11-08 05:05:08,0
+96298,15,1,13,315,2017-11-08 15:43:22,0
+39834,2,2,19,205,2017-11-09 15:40:42,0
+69752,3,1,32,280,2017-11-08 14:59:06,0
+93320,7,1,35,101,2017-11-09 11:13:54,0
+44181,3,1,14,280,2017-11-09 01:52:38,0
+69701,3,1,18,280,2017-11-06 17:00:15,0
+62391,12,1,19,340,2017-11-08 04:30:10,0
+28795,8,1,32,145,2017-11-06 19:16:40,0
+40440,9,1,19,215,2017-11-07 00:48:27,0
+100042,12,1,13,265,2017-11-06 16:46:25,0
+201529,15,1,13,245,2017-11-06 23:48:54,0
+102435,1,1,10,135,2017-11-09 02:29:27,0
+99132,8,1,46,145,2017-11-07 02:53:07,0
+106776,14,1,13,379,2017-11-09 11:39:54,0
+66463,7,1,19,101,2017-11-07 10:08:34,0
+45137,14,1,19,442,2017-11-09 13:26:28,0
+151411,12,1,13,245,2017-11-06 17:06:17,0
+45793,15,1,19,245,2017-11-08 19:54:29,0
+69853,9,1,18,134,2017-11-08 06:55:06,0
+84530,3,1,13,115,2017-11-08 13:06:36,0
+357239,18,1,25,134,2017-11-09 06:41:35,0
+77355,15,1,19,245,2017-11-07 11:02:02,0
+93860,12,1,10,178,2017-11-08 05:07:08,0
+5348,3,1,19,442,2017-11-09 10:53:23,0
+32745,12,1,13,19,2017-11-09 07:47:44,0
+177614,3,1,40,280,2017-11-07 05:39:56,0
+60271,2,1,53,205,2017-11-08 15:43:24,0
+27678,9,1,19,334,2017-11-07 10:38:02,0
+106362,3,1,8,280,2017-11-09 01:23:53,0
+42127,14,1,17,371,2017-11-08 00:05:23,0
+5314,2,1,6,477,2017-11-09 12:43:59,0
+5314,9,1,19,134,2017-11-08 23:30:15,0
+237235,3,1,9,130,2017-11-08 04:17:46,0
+81776,12,1,12,140,2017-11-08 13:00:11,0
+42143,3,1,18,280,2017-11-09 03:48:32,0
+323330,3,1,15,424,2017-11-09 05:26:28,0
+102062,14,1,19,480,2017-11-08 06:33:10,0
+33860,2,1,41,401,2017-11-09 00:15:07,0
+29372,8,1,8,145,2017-11-08 02:03:07,0
+25485,26,1,13,477,2017-11-08 11:59:31,0
+159850,2,1,25,205,2017-11-08 06:02:24,0
+61178,12,1,9,259,2017-11-09 04:55:50,0
+123809,3,1,19,280,2017-11-08 15:57:06,0
+104836,26,1,53,121,2017-11-06 22:30:21,0
+89982,3,1,13,424,2017-11-08 07:17:12,0
+31119,14,1,22,134,2017-11-07 23:26:18,0
+159355,27,1,19,122,2017-11-07 23:38:26,0
+124166,3,1,19,280,2017-11-07 03:54:28,0
+44663,3,1,19,402,2017-11-07 11:31:20,0
+22240,2,1,3,377,2017-11-08 17:34:23,0
+73516,12,1,32,326,2017-11-09 08:48:45,0
+7124,11,1,15,487,2017-11-07 08:53:35,0
+186377,9,1,20,244,2017-11-07 01:51:37,0
+80058,2,1,13,122,2017-11-08 07:37:30,0
+105534,2,1,16,122,2017-11-09 14:15:45,0
+180820,18,1,19,107,2017-11-07 12:41:28,0
+200808,23,1,13,153,2017-11-07 23:38:04,0
+78150,25,1,19,259,2017-11-07 01:20:45,0
+8104,14,1,13,401,2017-11-07 05:08:29,0
+68530,18,1,19,121,2017-11-08 03:01:31,0
+144353,3,1,10,280,2017-11-08 15:44:12,0
+276071,15,1,3,111,2017-11-09 09:34:51,0
+164071,11,1,37,325,2017-11-07 01:22:38,0
+177032,3,1,14,452,2017-11-07 03:48:24,0
+147065,3,1,19,424,2017-11-07 04:10:18,0
+38935,23,1,13,153,2017-11-09 13:33:15,0
+3994,12,1,13,205,2017-11-08 23:33:44,0
+35951,3,1,13,19,2017-11-07 12:32:43,0
+20628,14,1,25,463,2017-11-07 14:53:26,0
+75007,3,1,37,442,2017-11-07 17:05:11,0
+2810,15,1,22,265,2017-11-09 09:17:19,0
+88785,3,1,37,19,2017-11-07 23:36:37,0
+100859,14,1,13,379,2017-11-07 07:16:24,0
+69710,25,1,37,259,2017-11-07 11:52:31,0
+83252,8,1,19,145,2017-11-09 02:08:51,0
+64615,7,1,13,101,2017-11-09 06:23:08,0
+75979,2,1,13,469,2017-11-08 23:03:18,0
+120594,3,1,13,280,2017-11-07 14:37:42,0
+103527,3,1,20,173,2017-11-08 02:13:50,0
+35774,18,1,17,107,2017-11-09 06:44:02,0
+32609,12,1,18,497,2017-11-08 13:50:45,0
+17572,6,1,58,125,2017-11-07 11:04:18,0
+5314,14,1,19,463,2017-11-08 17:12:15,0
+1755,15,1,19,315,2017-11-08 09:13:11,0
+113865,12,1,13,265,2017-11-07 00:42:08,0
+301290,1,2,100,13,2017-11-09 13:41:13,0
+166857,18,1,19,107,2017-11-08 11:22:06,0
+100212,13,1,13,400,2017-11-08 14:00:15,0
+5348,28,1,22,135,2017-11-08 05:00:25,0
+317757,18,1,49,376,2017-11-09 08:29:04,0
+16462,18,1,19,439,2017-11-07 10:29:17,0
+33181,3,1,13,280,2017-11-09 02:16:48,0
+161166,64,1,13,459,2017-11-06 20:17:15,0
+334558,12,1,13,19,2017-11-09 10:22:12,0
+34751,13,1,19,400,2017-11-09 01:54:09,0
+19014,9,1,19,334,2017-11-08 22:41:14,0
+67632,18,1,9,107,2017-11-09 09:15:54,0
+37892,2,1,19,469,2017-11-07 08:57:04,0
+66254,3,1,6,173,2017-11-08 09:36:05,0
+4177,9,1,13,127,2017-11-09 15:28:51,0
+57854,26,1,41,266,2017-11-07 14:38:58,0
+46720,13,1,19,477,2017-11-09 11:51:52,0
+25152,15,1,19,412,2017-11-07 08:40:26,0
+23878,17,2,17,280,2017-11-08 08:00:38,0
+31247,3,1,19,280,2017-11-09 02:20:26,0
+124574,21,2,19,128,2017-11-06 22:18:32,0
+55849,18,1,13,107,2017-11-07 00:19:44,0
+5314,12,1,17,481,2017-11-09 14:00:20,0
+30636,23,1,13,153,2017-11-08 15:16:41,0
+26801,23,1,11,153,2017-11-09 00:07:44,0
+80634,15,1,13,245,2017-11-09 05:31:54,0
+81571,1,1,6,134,2017-11-09 05:06:04,0
+43514,3,1,18,280,2017-11-09 06:36:35,0
+105519,12,2,13,178,2017-11-07 12:15:09,0
+90509,20,2,13,259,2017-11-09 00:50:26,0
+69100,18,1,17,107,2017-11-08 14:35:28,0
+439,15,1,32,245,2017-11-07 11:41:15,0
+4019,3,1,18,280,2017-11-08 11:23:04,0
+74497,22,1,16,116,2017-11-09 15:43:30,0
+71535,1,1,9,134,2017-11-07 07:08:28,0
+195475,14,1,8,442,2017-11-07 15:02:37,0
+188073,2,1,20,364,2017-11-09 03:55:04,0
+17077,3,1,19,280,2017-11-09 05:19:08,0
+31444,3,1,13,19,2017-11-07 11:18:16,0
+270249,3,1,1,280,2017-11-08 09:13:15,0
+36340,12,1,17,259,2017-11-07 02:08:08,0
+168896,11,1,19,319,2017-11-06 23:46:27,0
+59043,14,1,13,401,2017-11-09 02:20:35,0
+40700,6,1,25,459,2017-11-09 06:26:09,0
+50129,35,1,19,21,2017-11-08 23:40:16,1
+27499,18,1,10,107,2017-11-09 00:55:22,0
+140993,2,1,19,219,2017-11-07 00:22:08,0
+28417,3,1,19,280,2017-11-08 12:34:58,0
+108341,2,1,13,205,2017-11-08 09:04:21,0
+79176,2,1,13,469,2017-11-07 15:24:41,0
+63101,14,1,19,442,2017-11-08 08:43:18,0
+76822,6,1,19,459,2017-11-07 06:30:21,0
+137176,9,1,19,466,2017-11-07 08:50:42,0
+38604,15,1,13,245,2017-11-08 14:55:07,0
+5178,21,1,19,128,2017-11-09 02:54:16,0
+143216,12,1,16,328,2017-11-08 04:07:10,0
+71076,3,1,6,466,2017-11-07 07:02:30,0
+277902,11,1,12,319,2017-11-08 08:08:25,0
+93003,7,1,13,101,2017-11-08 10:27:12,0
+214064,18,1,13,439,2017-11-08 15:15:37,0
+7481,18,1,17,107,2017-11-07 01:44:03,0
+104868,3,1,13,205,2017-11-08 00:37:10,0
+288760,12,1,13,178,2017-11-09 14:06:43,0
+116562,3,1,19,280,2017-11-07 07:21:05,0
+16278,9,1,9,466,2017-11-09 06:49:20,0
+265385,9,1,13,232,2017-11-09 03:37:13,0
+122415,13,1,6,477,2017-11-09 13:00:30,0
+81935,94,1,1,361,2017-11-09 14:25:41,0
+228090,47,1,13,484,2017-11-09 10:50:09,0
+31512,1,1,19,377,2017-11-08 10:39:37,0
+65552,9,1,13,334,2017-11-09 05:45:16,0
+83723,11,1,19,319,2017-11-08 08:37:49,0
+178991,3,1,19,280,2017-11-08 03:00:58,0
+103833,3,1,13,424,2017-11-08 19:45:07,0
+57723,3,1,13,280,2017-11-07 00:53:51,0
+133811,12,1,18,178,2017-11-08 05:00:09,0
+44661,2,1,19,477,2017-11-07 16:25:51,0
+53874,3,1,16,280,2017-11-07 02:11:47,0
+135320,3,1,19,280,2017-11-08 01:39:18,0
+5704,17,1,13,280,2017-11-07 12:51:36,0
+216664,9,1,19,334,2017-11-08 04:08:28,0
+50512,9,2,9,215,2017-11-07 00:01:57,0
+124953,12,1,15,178,2017-11-09 03:09:13,0
+66658,11,1,13,487,2017-11-07 23:48:46,0
+170219,18,1,7,134,2017-11-08 12:18:13,0
+362472,12,1,12,265,2017-11-09 05:19:07,0
+29501,23,1,18,153,2017-11-07 07:50:48,0
+112666,12,1,6,265,2017-11-09 04:48:28,0
+299020,2,1,15,435,2017-11-08 17:56:05,0
+64428,9,1,13,334,2017-11-07 12:08:24,0
+27391,9,1,23,127,2017-11-08 13:42:40,0
+203230,14,1,22,446,2017-11-06 23:39:06,0
+107008,2,1,18,237,2017-11-08 03:24:22,0
+69852,12,1,17,326,2017-11-07 03:23:59,0
+79881,9,1,20,244,2017-11-07 01:33:24,0
+29140,28,1,19,135,2017-11-06 23:18:42,0
+15192,2,1,13,219,2017-11-09 13:50:00,0
+14872,12,1,35,265,2017-11-09 03:51:05,0
+209663,3,1,19,280,2017-11-08 12:52:37,0
+119531,9,1,11,442,2017-11-09 09:58:02,0
+44349,12,1,20,409,2017-11-09 02:00:53,0
+87764,12,1,22,481,2017-11-08 06:24:17,0
+362691,24,1,22,105,2017-11-09 15:54:58,0
+111324,11,1,19,173,2017-11-07 05:47:19,0
+36813,15,1,13,138,2017-11-09 03:50:39,0
+5348,12,2,8,178,2017-11-07 07:50:00,0
+60725,12,2,13,259,2017-11-07 21:51:29,0
+194238,3,1,19,137,2017-11-07 09:14:40,0
+94491,12,1,13,328,2017-11-09 12:42:34,0
+60580,18,1,6,134,2017-11-08 08:58:38,0
+15488,8,1,13,145,2017-11-07 05:22:41,0
+99944,3,2,22,280,2017-11-08 04:09:20,0
+91166,21,1,22,232,2017-11-07 17:12:37,0
+62803,8,1,13,145,2017-11-09 13:10:29,0
+11196,11,1,19,219,2017-11-09 12:22:37,0
+301394,2,1,16,237,2017-11-09 01:41:53,0
+18332,9,1,8,466,2017-11-09 12:21:02,0
+11616,14,1,6,379,2017-11-08 07:14:20,0
+25818,12,1,13,178,2017-11-09 01:48:10,0
+118844,3,1,13,211,2017-11-08 16:50:20,0
+215353,12,1,11,259,2017-11-09 11:11:27,0
+139907,3,1,13,19,2017-11-06 16:52:34,0
+106267,8,1,10,145,2017-11-08 08:05:14,0
+75539,12,1,26,245,2017-11-07 16:04:20,0
+114913,2,1,13,219,2017-11-09 15:49:12,0
+53910,11,1,25,481,2017-11-07 03:32:47,0
+62704,9,1,13,107,2017-11-08 19:38:11,0
+37892,3,1,19,280,2017-11-07 07:38:15,0
+123948,24,1,12,105,2017-11-07 02:51:32,0
+93953,9,1,19,134,2017-11-08 11:00:11,0
+5314,9,2,9,145,2017-11-08 10:34:14,0
+121228,12,1,19,328,2017-11-07 09:28:09,0
+89336,1,1,37,377,2017-11-08 06:14:54,0
+111967,3,1,1,113,2017-11-09 06:49:59,0
+196489,28,1,10,135,2017-11-07 14:14:25,0
+79190,18,1,15,107,2017-11-08 13:48:25,0
+85603,12,1,8,212,2017-11-08 09:12:05,0
+22117,2,1,30,364,2017-11-07 14:29:39,0
+237370,2,1,30,469,2017-11-08 06:51:46,0
+119289,19,0,24,347,2017-11-09 03:53:27,0
+119524,14,1,13,463,2017-11-07 11:55:20,0
+68382,14,1,19,489,2017-11-08 15:48:55,0
+146210,19,0,24,213,2017-11-08 22:58:10,0
+64595,15,1,13,278,2017-11-07 11:47:17,0
+67494,7,1,14,101,2017-11-09 06:22:46,0
+95245,3,1,19,130,2017-11-07 00:02:39,0
+6595,2,1,8,237,2017-11-07 03:52:24,0
+82427,3,1,13,280,2017-11-08 09:32:48,0
+8590,15,1,20,245,2017-11-07 00:48:01,0
+234613,64,1,19,459,2017-11-08 06:44:34,0
+206765,3,1,19,173,2017-11-09 06:13:49,0
+119901,6,1,19,459,2017-11-08 01:53:26,0
+44067,12,2,11,265,2017-11-08 16:32:33,0
+29347,11,1,15,469,2017-11-08 22:39:42,0
+98995,14,1,17,349,2017-11-07 08:47:23,0
+53479,2,1,6,477,2017-11-08 15:04:20,0
+95585,12,1,19,178,2017-11-07 16:05:53,0
+114220,6,1,15,459,2017-11-06 16:19:37,0
+115975,15,1,17,265,2017-11-07 04:22:38,0
+20813,14,1,13,379,2017-11-07 03:44:05,0
+55034,26,1,13,477,2017-11-08 19:49:55,0
+53454,2,2,17,469,2017-11-09 11:38:53,0
+73516,14,1,19,379,2017-11-07 16:03:58,0
+15572,18,1,53,107,2017-11-08 01:14:32,0
+97347,15,1,16,386,2017-11-08 23:38:30,0
+70576,2,1,19,477,2017-11-09 10:04:57,0
+337629,18,1,5,107,2017-11-08 19:44:00,0
+69017,9,1,13,215,2017-11-08 06:33:48,0
+124686,2,1,20,237,2017-11-07 01:15:14,0
+29453,28,1,19,135,2017-11-07 04:23:41,0
+73516,14,2,5,467,2017-11-08 22:49:15,0
+10749,12,1,15,409,2017-11-08 14:23:03,0
+118563,9,2,9,442,2017-11-08 10:57:37,0
+101435,18,1,13,134,2017-11-08 03:18:49,0
+94003,3,1,19,409,2017-11-07 07:20:48,0
+182081,15,1,19,245,2017-11-07 11:51:43,0
+37948,18,1,13,121,2017-11-09 02:10:41,0
+5314,2,1,66,469,2017-11-07 09:19:18,0
+105433,12,1,19,245,2017-11-08 14:32:21,0
+19235,9,1,19,215,2017-11-06 16:18:16,0
+96906,12,1,19,265,2017-11-09 00:01:21,0
+5348,9,1,3,244,2017-11-07 05:40:45,0
+124576,3,1,13,280,2017-11-09 04:13:40,0
+21274,12,1,18,219,2017-11-08 08:02:58,0
+75593,18,3032,607,107,2017-11-07 10:05:45,0
+37287,13,1,19,477,2017-11-07 12:48:06,0
+4714,25,1,26,259,2017-11-09 08:41:49,0
+20893,12,1,9,178,2017-11-07 03:14:00,0
+54688,3,1,13,135,2017-11-06 16:13:49,0
+25737,15,1,6,245,2017-11-06 16:36:29,0
+43872,13,1,13,477,2017-11-07 08:50:57,0
+72204,15,1,10,245,2017-11-08 14:50:30,0
+81448,3,1,10,280,2017-11-09 01:08:32,0
+67725,21,1,18,128,2017-11-09 00:10:40,0
+58666,14,1,9,489,2017-11-08 00:35:48,0
+47110,3,1,13,280,2017-11-08 12:02:57,0
+48384,3,1,19,409,2017-11-07 10:49:55,0
+78758,3,1,3,280,2017-11-09 04:34:33,0
+118190,27,1,41,153,2017-11-09 08:55:50,0
+62723,3,1,1,280,2017-11-08 09:25:21,0
+944,2,1,6,377,2017-11-08 02:35:30,0
+100212,14,1,13,349,2017-11-08 15:25:06,0
+121461,18,1,2,107,2017-11-09 09:32:28,0
+78809,15,1,13,245,2017-11-08 21:35:00,0
+44663,12,1,19,409,2017-11-08 16:48:32,0
+96832,12,1,7,19,2017-11-07 11:30:32,0
+47980,12,1,25,259,2017-11-08 02:53:56,0
+210972,20,1,13,259,2017-11-07 01:42:29,0
+102156,13,1,17,400,2017-11-06 23:20:08,0
+67734,3,1,13,442,2017-11-08 08:46:33,0
+21595,14,1,19,442,2017-11-08 01:12:21,0
+85041,9,1,23,445,2017-11-08 08:09:05,0
+39020,14,1,19,134,2017-11-08 10:32:48,0
+55726,1,1,13,452,2017-11-09 07:56:23,0
+90485,2,1,19,469,2017-11-08 15:03:10,0
+82843,14,1,8,489,2017-11-08 04:37:55,0
+224991,3,1,19,205,2017-11-09 05:28:51,0
+48919,6,1,17,459,2017-11-06 18:38:13,0
+202531,11,1,13,469,2017-11-07 02:57:41,0
+19934,2,1,18,237,2017-11-09 04:45:51,0
+1337,3,1,31,280,2017-11-08 03:51:48,0
+111299,3,1,19,280,2017-11-08 06:47:30,0
+182820,3,1,17,480,2017-11-07 05:24:01,0
+34223,1,1,13,178,2017-11-07 06:00:43,0
+111299,12,1,40,245,2017-11-06 18:02:58,0
+81935,14,1,19,489,2017-11-08 13:12:23,0
+59395,18,2,19,121,2017-11-08 05:14:47,0
+23776,9,1,22,215,2017-11-07 09:47:30,0
+83018,14,1,19,379,2017-11-07 04:22:49,0
+33549,9,1,20,442,2017-11-07 02:48:43,0
+53715,3,1,20,409,2017-11-06 17:04:28,0
+22572,3,1,13,135,2017-11-09 15:09:01,0
+118146,12,1,13,178,2017-11-07 16:10:34,0
+108858,9,1,19,215,2017-11-07 13:25:26,0
+208771,21,1,19,128,2017-11-07 01:34:21,0
+24703,13,1,13,477,2017-11-08 08:35:26,0
+230514,2,1,18,237,2017-11-08 05:11:50,0
+111277,12,1,19,242,2017-11-09 03:59:39,0
+85120,24,1,19,105,2017-11-08 14:15:50,0
+26050,18,1,41,107,2017-11-07 14:47:16,0
+5314,8,2,13,145,2017-11-07 22:52:32,0
+3313,9,1,19,127,2017-11-09 09:58:33,0
+200612,2,1,18,212,2017-11-07 21:12:53,0
+147142,18,1,13,134,2017-11-07 06:19:31,0
+130641,3,1,13,280,2017-11-07 02:51:33,0
+121839,2,1,18,236,2017-11-09 01:55:06,0
+292112,7,3866,866,101,2017-11-09 07:36:03,0
+81327,9,1,19,334,2017-11-06 21:55:53,0
+177104,9,1,13,232,2017-11-08 00:35:51,0
+116252,3,1,19,317,2017-11-08 08:57:33,0
+67776,15,1,16,245,2017-11-07 03:22:34,0
+79909,3,2,19,317,2017-11-09 15:09:54,0
+141399,9,1,14,490,2017-11-09 11:09:20,0
+234634,21,1,41,232,2017-11-08 03:00:17,0
+64054,3,1,13,280,2017-11-06 17:20:19,0
+47842,2,1,17,122,2017-11-08 03:17:32,0
+105181,15,1,31,265,2017-11-09 07:32:41,0
+247374,9,1,16,445,2017-11-08 13:30:24,0
+5729,9,1,35,215,2017-11-07 09:08:23,0
+29974,9,1,15,466,2017-11-08 12:30:19,0
+145896,15,1,19,386,2017-11-07 11:49:34,0
+27627,1,1,14,134,2017-11-08 03:55:34,0
+23641,6,2,42,125,2017-11-09 13:03:22,0
+136299,9,1,10,244,2017-11-09 04:01:11,0
+26350,15,1,22,130,2017-11-07 14:09:54,0
+154993,3,1,22,442,2017-11-08 22:57:33,0
+3218,26,1,40,121,2017-11-07 11:30:25,0
+113460,15,1,18,315,2017-11-07 00:29:15,0
+59176,12,2,13,277,2017-11-08 01:10:08,0
+114220,7,1,27,101,2017-11-09 06:20:53,0
+27475,64,1,20,459,2017-11-08 00:28:39,0
+105290,15,1,19,278,2017-11-08 07:05:01,0
+313428,9,1,19,234,2017-11-09 14:28:30,0
+202653,15,1,19,245,2017-11-07 06:45:26,0
+147605,2,1,19,377,2017-11-09 14:45:10,0
+21536,18,1,6,107,2017-11-07 09:28:22,0
+52810,26,1,9,266,2017-11-07 23:17:07,0
+43188,3,1,30,489,2017-11-08 07:16:42,0
+287355,7,1,31,101,2017-11-09 06:34:50,0
+64454,8,1,22,145,2017-11-07 14:02:08,0
+116190,12,1,17,265,2017-11-08 04:52:47,0
+16658,64,1,13,459,2017-11-07 14:28:48,0
+28176,2,1,37,212,2017-11-08 14:41:48,0
+172751,12,1,9,245,2017-11-08 19:27:36,0
+6022,9,2,19,322,2017-11-09 15:32:55,0
+23102,18,1,53,134,2017-11-08 05:02:44,0
+8794,2,1,36,435,2017-11-07 12:07:05,0
+102511,12,1,20,245,2017-11-08 09:27:28,0
+28980,12,1,19,497,2017-11-07 18:10:42,0
+114490,2,2,19,477,2017-11-08 12:00:46,0
+145260,2,1,28,477,2017-11-06 23:24:02,0
+121042,2,1,19,237,2017-11-07 00:29:48,0
+120662,2,1,32,212,2017-11-07 13:57:10,0
+100971,18,1,19,121,2017-11-09 03:52:17,0
+55369,9,1,13,489,2017-11-08 15:03:09,0
+5314,2,1,11,477,2017-11-07 11:31:48,0
+221137,15,1,19,278,2017-11-08 12:10:07,0
+120496,12,1,22,212,2017-11-08 04:09:24,0
+41232,3,1,13,424,2017-11-08 10:03:00,0
+114235,12,1,13,265,2017-11-07 15:05:27,0
+35884,12,1,8,178,2017-11-08 02:19:08,0
+26726,26,1,10,266,2017-11-08 04:14:38,0
+58127,3,1,13,280,2017-11-08 03:54:30,0
+86799,8,1,13,145,2017-11-08 10:40:47,0
+119558,9,1,19,215,2017-11-07 13:42:16,0
+190108,15,1,19,245,2017-11-07 07:21:30,0
+287359,2,1,47,477,2017-11-08 05:10:01,0
+109706,6,1,20,459,2017-11-08 15:20:24,0
+56330,8,1,19,145,2017-11-06 21:58:54,0
+41142,3,1,13,280,2017-11-07 05:28:05,0
+259470,2,1,19,122,2017-11-08 01:26:06,0
+33409,64,1,23,459,2017-11-07 12:38:08,0
+31597,14,1,20,379,2017-11-08 03:10:01,0
+83636,11,1,19,173,2017-11-06 23:36:18,0
+108913,15,1,19,245,2017-11-06 16:32:19,0
+48646,3,1,13,480,2017-11-07 01:19:47,0
+83321,3,1,10,409,2017-11-08 04:27:25,0
+149367,1,1,19,178,2017-11-07 07:50:48,0
+19934,9,1,22,442,2017-11-07 04:17:31,0
+25588,2,1,19,205,2017-11-07 13:17:37,0
+41963,2,1,17,236,2017-11-09 02:52:38,0
+95601,26,1,8,477,2017-11-08 05:52:20,0
+19452,12,1,16,245,2017-11-08 00:21:53,0
+14918,3,1,40,280,2017-11-08 16:23:15,0
+114816,13,1,19,477,2017-11-09 12:31:55,0
+36213,15,1,3,245,2017-11-08 17:25:04,0
+65773,3,1,13,173,2017-11-08 00:48:03,0
+75644,3,1,19,280,2017-11-08 12:50:02,0
+97670,56,1,19,406,2017-11-09 09:03:34,0
+837,21,1,25,128,2017-11-07 01:35:04,0
+52805,3,1,19,371,2017-11-07 00:16:34,0
+83252,12,1,13,178,2017-11-09 10:16:00,0
+132464,23,1,22,153,2017-11-08 10:58:16,0
+5314,18,1,12,439,2017-11-08 11:23:13,0
+23948,3,1,13,280,2017-11-08 05:04:36,0
+197864,15,1,19,153,2017-11-07 17:07:31,0
+178822,3,1,6,205,2017-11-07 04:18:01,0
+15970,14,1,16,442,2017-11-06 22:30:09,0
+186154,15,1,18,315,2017-11-07 14:04:21,0
+83886,12,1,53,409,2017-11-08 03:41:41,0
+86349,3,1,17,424,2017-11-07 15:36:37,0
+91066,14,1,30,439,2017-11-06 16:18:41,0
+220223,14,1,46,489,2017-11-08 10:40:02,0
+8081,14,1,6,480,2017-11-09 10:59:37,0
+111389,12,1,13,178,2017-11-09 10:36:29,0
+111078,9,1,19,134,2017-11-08 05:19:54,0
+109723,18,1,19,439,2017-11-08 12:52:49,0
+203358,3,1,22,137,2017-11-09 15:39:16,0
+15940,14,1,19,379,2017-11-08 05:40:53,0
+160610,18,1,13,107,2017-11-07 08:53:45,0
+29411,13,1,13,400,2017-11-07 04:53:45,0
+145970,2,1,19,243,2017-11-07 00:38:11,0
+112543,12,1,35,140,2017-11-08 13:26:21,0
+259980,12,1,19,265,2017-11-08 01:29:41,0
+50169,3,2,3,211,2017-11-09 12:36:50,0
+269083,9,1,18,134,2017-11-07 17:33:04,0
+93953,1,1,19,153,2017-11-09 04:01:17,0
+13597,18,1,19,107,2017-11-08 13:43:39,0
+86767,2,1,6,477,2017-11-07 11:33:46,0
+38633,2,1,13,212,2017-11-08 12:50:24,0
+50482,15,1,19,245,2017-11-07 16:03:03,0
+213380,14,1,19,489,2017-11-08 06:57:54,0
+295677,19,6,29,333,2017-11-09 13:54:28,1
+18667,21,1,18,128,2017-11-09 08:49:46,0
+111182,3,1,13,442,2017-11-07 00:21:31,0
+77475,9,1,6,334,2017-11-08 23:56:36,0
+116272,13,1,14,469,2017-11-09 15:10:56,0
+5348,9,1,19,334,2017-11-07 03:21:21,0
+177427,2,1,20,469,2017-11-09 12:14:12,0
+100224,25,1,20,259,2017-11-07 08:43:02,0
+76068,2,1,22,469,2017-11-09 01:56:23,0
+180644,14,1,28,480,2017-11-07 07:58:18,0
+293463,11,1,19,360,2017-11-09 15:42:53,0
+111324,3,1,13,19,2017-11-09 01:57:50,0
+100149,15,1,17,278,2017-11-07 02:23:09,0
+79190,1,1,19,137,2017-11-09 10:18:11,0
+16506,18,1,18,107,2017-11-08 11:43:25,0
+49602,15,1,19,245,2017-11-09 06:20:38,0
+322041,3,1,9,205,2017-11-09 09:03:08,0
+151267,14,1,1,480,2017-11-09 04:25:13,0
+750,9,1,2,134,2017-11-09 13:17:28,0
+19216,12,1,13,328,2017-11-08 23:56:22,0
+109979,8,1,13,145,2017-11-07 16:39:57,0
+80121,14,1,13,489,2017-11-09 07:47:57,0
+333876,12,1,19,245,2017-11-09 04:10:28,0
+1810,2,1,16,237,2017-11-07 17:29:11,0
+32323,3,1,19,452,2017-11-09 01:40:36,0
+15518,2,1,19,469,2017-11-07 13:05:22,0
+74784,13,1,12,400,2017-11-07 13:02:49,0
+5348,18,1,13,121,2017-11-09 01:24:00,0
+37972,2,1,19,122,2017-11-08 17:05:30,0
+223848,3,1,18,280,2017-11-08 12:28:42,0
+37948,21,2,20,128,2017-11-08 16:47:19,0
+107892,9,1,19,232,2017-11-09 11:14:03,0
+95574,3,1,16,280,2017-11-07 02:55:38,0
+82663,12,1,17,178,2017-11-08 03:10:22,0
+64079,18,1,36,107,2017-11-07 08:50:40,0
+123907,18,1,19,107,2017-11-07 15:54:17,0
+155293,18,1,19,107,2017-11-08 03:24:55,0
+171793,12,1,17,178,2017-11-07 04:01:40,0
+262668,3,1,10,417,2017-11-09 03:44:03,0
+113762,18,1,19,107,2017-11-07 12:29:30,0
+171108,2,1,53,377,2017-11-08 08:56:52,0
+120094,2,1,1,477,2017-11-08 04:12:23,0
+81501,14,1,13,442,2017-11-07 13:47:19,0
+117033,9,1,13,258,2017-11-09 04:44:46,0
+106524,151,0,50,347,2017-11-08 07:55:09,0
+106354,12,1,19,178,2017-11-09 10:45:24,0
+56659,12,1,37,265,2017-11-08 10:48:05,0
+36213,2,1,37,205,2017-11-09 04:30:18,0
+78901,12,1,13,205,2017-11-09 03:39:10,0
+22300,15,1,15,386,2017-11-09 03:46:02,0
+166874,3,1,6,135,2017-11-06 16:05:37,0
+114107,24,1,13,178,2017-11-07 17:09:06,0
+41497,21,1,13,232,2017-11-09 10:35:32,0
+85118,18,1,1,121,2017-11-08 13:07:33,0
+125796,3,2,13,280,2017-11-07 07:27:27,0
+56007,18,1,17,107,2017-11-09 10:05:41,0
+5348,2,1,53,435,2017-11-08 15:16:58,0
+52801,3,1,8,137,2017-11-07 00:28:09,0
+7471,24,1,19,105,2017-11-08 08:57:55,0
+108858,3,1,19,280,2017-11-08 09:40:59,0
+372,15,1,47,379,2017-11-09 03:52:24,0
+129243,3,1,37,211,2017-11-08 23:16:28,0
+277165,2,1,19,237,2017-11-08 02:34:28,0
+107155,9,1,12,215,2017-11-08 15:12:59,0
+99523,3,1,17,280,2017-11-08 10:10:11,0
+112198,6,1,10,459,2017-11-09 04:57:23,0
+116762,3,1,13,115,2017-11-09 15:45:29,0
+96198,2,1,19,219,2017-11-09 05:16:49,0
+42313,15,1,13,245,2017-11-07 15:30:00,0
+57400,13,1,19,477,2017-11-07 00:45:49,0
+43881,3,1,19,205,2017-11-09 04:31:31,0
+5348,3,1,15,280,2017-11-08 12:31:50,0
+3964,2,1,6,477,2017-11-09 09:54:12,0
+60182,8,1,19,145,2017-11-09 10:31:31,0
+79702,3,1,3,280,2017-11-08 12:57:18,0
+111547,11,1,13,469,2017-11-07 10:44:54,0
+56889,18,1,19,439,2017-11-09 01:30:10,0
+167401,3,1,13,280,2017-11-08 01:48:16,0
+2052,15,1,19,140,2017-11-08 17:17:30,0
+202193,13,1,18,477,2017-11-09 12:51:35,0
+256779,9,1,8,215,2017-11-08 09:39:35,0
+44169,3,1,13,480,2017-11-07 07:30:26,0
+68188,3,1,19,135,2017-11-07 15:41:35,0
+16520,9,1,37,334,2017-11-09 04:41:29,0
+77368,2,1,32,237,2017-11-09 07:04:58,0
+48679,23,1,19,153,2017-11-07 03:55:40,0
+101931,12,1,3,178,2017-11-09 05:18:46,0
+120055,26,1,19,121,2017-11-09 04:47:37,0
+16290,15,1,18,245,2017-11-08 22:29:57,0
+103526,12,1,18,328,2017-11-09 15:39:18,0
+17728,3,1,19,452,2017-11-08 15:35:58,0
+107954,12,1,18,245,2017-11-08 09:33:49,0
+32290,9,1,26,134,2017-11-09 14:13:48,0
+45745,3,1,17,173,2017-11-08 18:07:21,0
+2805,12,1,19,178,2017-11-09 07:30:42,0
+176799,3,2,17,280,2017-11-07 01:41:27,0
+128236,3,1,17,280,2017-11-07 01:49:02,0
+150112,15,1,19,140,2017-11-07 15:40:08,0
+133825,3,1,22,442,2017-11-06 23:06:53,0
+58637,64,1,13,459,2017-11-08 01:34:24,0
+121472,13,1,9,477,2017-11-09 13:31:26,0
+80336,12,2,9,178,2017-11-08 14:45:38,0
+62704,14,1,8,439,2017-11-08 22:37:42,0
+79909,12,1,53,424,2017-11-08 03:20:35,0
+205572,3,1,19,480,2017-11-07 06:14:53,0
+107263,2,1,53,212,2017-11-08 13:15:21,0
+23674,12,2,19,259,2017-11-06 23:44:02,0
+51992,12,1,17,140,2017-11-06 18:05:08,0
+27288,3,1,18,280,2017-11-07 07:31:46,0
+124156,18,1,19,107,2017-11-09 11:07:54,0
+101116,3,2,13,371,2017-11-08 00:19:36,0
+58760,6,1,13,125,2017-11-07 23:53:44,0
+106812,9,1,19,466,2017-11-09 08:10:56,0
+27561,2,1,13,219,2017-11-08 02:33:30,0
+100309,2,1,19,477,2017-11-07 05:34:43,0
+90201,64,1,19,459,2017-11-07 17:14:24,0
+266308,9,1,27,442,2017-11-07 16:00:52,0
+53557,15,1,47,315,2017-11-08 07:44:00,0
+5749,6,1,6,125,2017-11-07 02:45:46,0
+175504,3,1,15,280,2017-11-08 13:00:44,0
+174153,3,1,18,115,2017-11-09 13:35:11,0
+59170,27,1,17,122,2017-11-07 14:01:56,0
+105475,15,1,13,386,2017-11-09 14:24:20,0
+47682,12,1,8,265,2017-11-07 12:46:51,0
+38653,15,1,19,245,2017-11-07 07:48:07,0
+56708,3,1,13,480,2017-11-08 15:27:01,0
+122517,1,1,28,134,2017-11-07 01:48:54,0
+125896,18,1,6,134,2017-11-07 08:45:57,0
+28374,9,1,15,134,2017-11-07 18:28:22,0
+318762,3,2,47,113,2017-11-09 05:52:26,0
+74013,9,1,13,334,2017-11-08 07:20:30,0
+107748,15,1,8,386,2017-11-08 22:37:42,0
+56007,26,1,18,266,2017-11-08 14:00:52,0
+296258,29,1,19,213,2017-11-09 06:27:06,1
+43827,2,1,13,237,2017-11-08 06:59:14,0
+82284,18,1,15,107,2017-11-08 11:21:31,0
+38994,12,1,9,328,2017-11-09 01:23:49,0
+211188,12,1,22,328,2017-11-07 08:41:32,0
+123729,11,1,19,319,2017-11-08 13:16:51,0
+209663,2,1,12,205,2017-11-07 05:23:33,0
+261520,23,1,14,153,2017-11-07 22:45:10,0
+106883,18,3032,607,107,2017-11-07 09:05:42,0
+49649,3,1,53,280,2017-11-09 02:08:46,0
+7597,28,1,19,135,2017-11-07 10:06:07,0
+52646,9,1,13,334,2017-11-08 06:24:45,0
+123591,28,1,19,135,2017-11-06 23:41:11,0
+2228,14,1,13,439,2017-11-07 15:56:16,0
+10294,9,1,8,134,2017-11-09 04:31:57,0
+19023,18,1,19,107,2017-11-09 09:35:46,0
+275217,12,1,31,265,2017-11-09 04:15:00,0
+90509,9,1,13,215,2017-11-09 15:56:32,0
+90688,3,1,18,489,2017-11-08 12:27:08,0
+62320,15,1,18,111,2017-11-08 11:53:20,0
+111639,12,1,40,340,2017-11-08 16:03:48,0
+42712,3,1,19,442,2017-11-09 11:30:32,0
+4573,9,2,17,134,2017-11-09 12:36:49,0
+89762,15,1,27,245,2017-11-07 00:14:54,0
+45083,15,1,28,245,2017-11-08 13:48:24,0
+86767,3,1,32,466,2017-11-08 02:15:10,0
+34714,8,1,19,145,2017-11-08 06:23:23,0
+163448,6,1,13,459,2017-11-08 12:46:11,0
+97750,18,1,10,134,2017-11-09 13:34:47,0
+118146,12,1,22,259,2017-11-07 10:52:21,0
+165541,3,1,13,280,2017-11-09 01:16:29,0
+119349,3,1,19,409,2017-11-08 00:25:25,0
+105606,3,1,17,280,2017-11-08 11:49:25,0
+99972,3,1,18,409,2017-11-09 11:56:51,0
+92907,15,1,6,386,2017-11-07 14:09:38,0
+96827,2,1,19,477,2017-11-09 05:59:59,0
+85172,14,1,13,439,2017-11-08 00:37:37,0
+345285,14,1,19,489,2017-11-09 06:38:24,0
+93232,22,1,19,116,2017-11-09 00:50:39,0
+139542,13,1,42,477,2017-11-07 10:04:39,0
+67535,3,1,19,137,2017-11-07 12:29:55,0
+110450,8,1,13,145,2017-11-09 00:44:55,0
+9925,15,1,13,245,2017-11-07 16:02:02,0
+178701,3,1,41,137,2017-11-07 04:51:29,0
+36741,3,1,19,424,2017-11-07 04:16:03,0
+67291,9,1,19,334,2017-11-07 07:40:29,0
+925,24,2,13,178,2017-11-08 10:33:46,0
+7466,2,1,13,477,2017-11-07 05:59:05,0
+40898,9,1,18,134,2017-11-06 16:16:02,0
+171402,3,1,18,173,2017-11-06 22:57:53,0
+361422,24,1,2,178,2017-11-08 17:38:33,0
+211188,13,1,19,477,2017-11-07 06:57:45,0
+41313,1,1,13,17,2017-11-08 15:29:20,0
+76973,12,1,19,265,2017-11-08 18:27:44,0
+106524,2,1,13,237,2017-11-08 18:49:38,0
+51506,9,1,13,127,2017-11-09 09:15:41,0
+101941,21,1,6,232,2017-11-07 22:29:45,0
+120629,3,1,19,280,2017-11-08 16:05:33,0
+8109,15,1,13,245,2017-11-08 16:31:52,0
+42384,12,1,13,409,2017-11-07 06:21:18,0
+153499,12,1,13,259,2017-11-09 01:29:17,0
+71809,2,1,19,258,2017-11-08 03:55:15,0
+308666,11,1,18,137,2017-11-09 04:28:32,0
+105475,12,1,13,160,2017-11-09 15:46:23,0
+65971,25,1,13,259,2017-11-07 17:46:12,0
+86907,14,1,13,489,2017-11-06 17:56:37,0
+93808,2,1,17,236,2017-11-08 00:18:16,0
+167693,9,1,19,258,2017-11-09 10:15:07,0
+39684,1,1,20,349,2017-11-09 14:14:44,0
+68381,15,1,22,3,2017-11-06 23:00:52,0
+81610,32,1,19,376,2017-11-09 05:52:00,0
+55149,27,1,10,153,2017-11-07 08:57:29,0
+147957,3,1,6,205,2017-11-07 12:44:21,0
+15985,3,1,13,280,2017-11-08 11:58:20,0
+14323,3,1,10,280,2017-11-07 05:43:04,0
+114543,18,1,13,134,2017-11-09 00:48:22,0
+115388,8,1,19,145,2017-11-09 13:25:34,0
+138561,1,1,13,125,2017-11-07 12:06:23,0
+4475,8,1,19,145,2017-11-08 03:10:06,0
+14374,3,1,18,489,2017-11-09 15:57:12,0
+100115,3,1,13,424,2017-11-09 01:15:58,0
+95991,3,1,19,280,2017-11-09 02:49:36,0
+137520,18,1,58,134,2017-11-09 11:43:22,0
+280467,15,1,13,245,2017-11-08 02:54:54,0
+27838,15,1,19,265,2017-11-07 06:58:40,0
+107008,6,2,9,125,2017-11-08 08:55:43,0
+74013,3,1,13,280,2017-11-08 08:57:54,0
+121845,15,1,16,3,2017-11-06 17:21:53,0
+121697,3,1,8,280,2017-11-08 01:05:43,0
+83802,3,1,19,280,2017-11-08 07:40:18,0
+24850,23,1,17,153,2017-11-08 03:59:03,0
+43984,13,1,19,477,2017-11-07 23:48:56,0
+39780,14,1,17,489,2017-11-07 05:00:57,0
+100393,9,1,19,107,2017-11-09 14:37:40,0
+138561,15,1,19,245,2017-11-07 16:42:13,0
+48708,3,1,19,280,2017-11-09 01:37:40,0
+86767,12,1,13,178,2017-11-08 16:06:50,0
+93273,9,1,19,466,2017-11-09 15:47:53,0
+107828,18,1,18,121,2017-11-07 00:16:28,0
+27781,2,1,27,317,2017-11-08 10:42:50,0
+72467,21,1,13,128,2017-11-07 09:57:02,0
+52401,46,0,38,347,2017-11-08 03:47:57,0
+159280,3,1,13,280,2017-11-07 02:44:34,0
+58877,11,1,25,173,2017-11-07 00:41:36,0
+362224,9,1,14,490,2017-11-09 12:45:54,0
+234872,9,1,13,258,2017-11-08 11:02:48,0
+37919,15,1,25,3,2017-11-08 01:10:02,0
+76683,3,1,8,280,2017-11-07 09:31:39,0
+44663,9,1,13,334,2017-11-07 10:44:16,0
+124317,2,1,41,469,2017-11-07 03:49:35,0
+50217,1,1,13,134,2017-11-07 05:55:44,0
+67606,9,1,19,234,2017-11-07 15:56:38,0
+115986,1,1,19,134,2017-11-09 13:51:39,0
+10749,18,1,17,107,2017-11-09 02:17:47,0
+5348,1,1,3,134,2017-11-08 03:14:19,0
+4324,11,1,6,319,2017-11-06 23:56:55,0
+60691,2,1,19,452,2017-11-07 00:57:15,0
+100275,11,1,22,325,2017-11-09 15:31:17,0
+9330,28,1,19,135,2017-11-07 10:33:27,0
+44488,17,1,19,280,2017-11-08 02:52:24,0
+15696,12,1,20,245,2017-11-07 12:39:57,0
+21960,12,1,18,245,2017-11-06 18:06:28,0
+92878,12,1,15,259,2017-11-08 23:15:58,0
+99075,15,1,23,245,2017-11-07 06:57:48,0
+46516,18,3543,748,107,2017-11-07 23:28:59,0
+66964,2,1,19,237,2017-11-07 11:03:41,0
+26995,3,1,17,280,2017-11-09 00:38:12,0
+119167,18,1,13,439,2017-11-07 23:24:21,0
+77523,18,1,20,121,2017-11-07 23:46:30,0
+7528,3,1,19,280,2017-11-08 13:15:56,0
+166046,12,1,20,178,2017-11-07 03:28:15,0
+189032,14,1,8,439,2017-11-09 07:07:43,0
+14764,9,1,20,466,2017-11-09 00:21:20,0
+22368,12,1,18,178,2017-11-08 11:14:11,0
+106279,15,1,17,430,2017-11-09 00:39:13,0
+19225,15,1,13,245,2017-11-08 23:09:29,0
+21245,15,1,19,245,2017-11-08 11:37:11,0
+15719,9,1,13,127,2017-11-09 13:11:39,0
+64525,6,1,32,125,2017-11-08 10:58:09,0
+7435,3,1,13,280,2017-11-09 05:57:45,0
+64430,3,1,13,115,2017-11-07 12:34:40,0
+103131,9,1,58,134,2017-11-08 22:28:42,0
+55103,18,1,40,107,2017-11-07 00:12:51,0
+82870,3,1,3,115,2017-11-08 12:22:56,0
+129191,18,1,18,107,2017-11-07 18:40:02,0
+153711,3,1,13,442,2017-11-07 08:54:45,0
+157458,3,1,22,379,2017-11-08 10:00:43,0
+103147,15,1,13,245,2017-11-08 11:12:56,0
+73516,3,1,19,153,2017-11-07 03:12:53,0
+144133,3,1,18,371,2017-11-08 01:06:46,0
+70335,6,1,36,125,2017-11-07 08:35:12,0
+5314,12,2,35,265,2017-11-09 02:56:05,0
+67439,12,1,19,326,2017-11-08 16:36:12,0
+102208,2,1,12,477,2017-11-07 05:25:44,0
+100735,27,1,19,153,2017-11-07 00:59:06,0
+53454,1,1,19,178,2017-11-07 10:33:57,0
+116696,2,1,20,364,2017-11-09 01:17:12,0
+318717,1,1,10,125,2017-11-09 14:23:35,0
+60752,12,1,19,259,2017-11-09 13:57:20,0
+132782,64,1,9,459,2017-11-08 06:21:10,0
+50374,9,1,41,334,2017-11-06 23:15:21,0
+128138,1,1,13,377,2017-11-09 03:12:19,0
+203736,14,1,28,463,2017-11-08 07:07:15,0
+89426,3,1,13,280,2017-11-09 03:59:50,0
+212476,2,1,19,469,2017-11-06 23:42:29,0
+88284,3,1,32,452,2017-11-08 16:14:57,0
+75595,15,1,15,278,2017-11-09 12:22:02,0
+24671,3,1,13,379,2017-11-09 02:57:58,0
+40342,12,1,9,259,2017-11-08 00:25:49,0
+155518,14,1,49,480,2017-11-07 23:49:48,0
+88881,14,1,13,406,2017-11-08 00:50:31,0
+256958,14,1,10,442,2017-11-08 03:37:08,0
+98682,2,1,19,377,2017-11-08 00:34:49,0
+69575,2,1,8,236,2017-11-09 15:23:16,0
+5314,13,1,13,477,2017-11-08 10:28:05,0
+40423,12,1,23,259,2017-11-09 07:03:34,0
+161799,3,1,19,205,2017-11-07 00:38:17,0
+31415,3,1,17,280,2017-11-08 23:29:30,0
+8870,13,1,17,469,2017-11-08 07:20:19,0
+81476,12,1,13,265,2017-11-09 02:56:31,0
+46677,18,1,13,121,2017-11-08 11:15:01,0
+108881,15,1,22,130,2017-11-09 14:07:17,0
+46625,3,1,27,280,2017-11-07 03:43:26,0
+4466,2,1,4,237,2017-11-07 13:59:18,0
+99150,9,1,20,466,2017-11-08 09:28:11,0
+135450,13,1,13,477,2017-11-09 07:33:30,0
+106511,12,2,19,259,2017-11-07 11:12:49,0
+41240,15,1,15,278,2017-11-08 04:29:47,0
+198535,1,1,13,153,2017-11-07 01:47:41,0
+97444,7,1,13,101,2017-11-09 09:37:17,0
+108518,12,1,13,265,2017-11-08 16:15:48,0
+55873,29,1,10,343,2017-11-07 19:09:06,0
+34005,64,1,14,459,2017-11-08 06:11:35,0
+103104,9,1,10,244,2017-11-09 05:42:04,0
+10005,9,1,10,134,2017-11-07 00:30:21,0
+39314,3,1,16,280,2017-11-07 06:52:27,0
+119531,2,1,19,122,2017-11-08 09:06:24,0
+173874,12,1,13,265,2017-11-08 13:44:53,0
+215924,12,1,10,178,2017-11-08 08:46:01,0
+29377,3,1,9,280,2017-11-08 06:20:31,0
+63267,18,1,13,134,2017-11-09 03:35:09,0
+38845,22,1,19,496,2017-11-07 02:11:41,0
+48176,27,1,13,153,2017-11-09 10:49:37,0
+124574,15,1,19,430,2017-11-09 08:45:32,0
+118457,3,1,19,480,2017-11-07 06:57:25,0
+209609,1,1,13,452,2017-11-09 15:46:49,0
+18182,3,1,17,424,2017-11-08 07:04:03,0
+157074,3,1,34,280,2017-11-08 23:10:19,0
+41425,15,1,20,265,2017-11-09 00:29:37,0
+111172,22,1,25,496,2017-11-07 00:42:27,0
+4019,18,1,41,121,2017-11-09 11:19:11,0
+106178,2,1,13,477,2017-11-09 07:09:36,0
+123609,18,1,13,107,2017-11-08 03:39:41,0
+40372,3,1,19,280,2017-11-08 12:57:26,0
+107802,12,1,3,328,2017-11-08 04:27:56,0
+97151,12,1,19,259,2017-11-07 11:42:52,0
+119369,12,1,14,424,2017-11-07 13:08:57,0
+7448,9,1,6,489,2017-11-09 12:37:36,0
+55840,18,1,19,107,2017-11-08 15:47:52,0
+64555,9,1,19,215,2017-11-09 05:05:05,0
+88358,2,1,19,435,2017-11-09 04:59:05,0
+93248,15,1,9,315,2017-11-08 01:37:32,0
+117115,14,1,13,379,2017-11-09 07:54:41,0
+20143,12,1,20,205,2017-11-08 08:46:47,0
+95766,3,1,6,417,2017-11-07 15:14:50,0
+83763,9,1,16,466,2017-11-08 15:41:11,0
+357463,1,2,100,13,2017-11-09 14:32:18,0
+20425,9,1,19,215,2017-11-07 07:56:43,1
+31783,18,3032,607,107,2017-11-07 13:54:23,0
+26995,1,2,13,124,2017-11-09 04:52:39,0
+110078,64,1,19,459,2017-11-08 01:38:43,0
+83230,26,1,19,266,2017-11-08 03:16:08,0
+11829,14,1,53,463,2017-11-07 00:23:59,0
+15080,3,1,15,137,2017-11-08 10:54:32,0
+7635,21,1,17,128,2017-11-08 14:47:15,0
+44067,12,1,13,245,2017-11-07 15:54:08,0
+106200,18,1,15,107,2017-11-07 01:14:30,0
+64516,12,1,16,328,2017-11-07 12:27:13,0
+353099,9,1,13,134,2017-11-09 00:20:26,0
+92673,12,1,30,259,2017-11-07 13:24:30,0
+83280,12,1,13,328,2017-11-09 02:47:06,0
+356760,28,1,19,135,2017-11-09 08:48:54,0
+41227,7,1,1,101,2017-11-07 11:21:33,0
+30912,27,1,19,153,2017-11-09 03:03:16,0
+77468,2,1,10,243,2017-11-09 06:51:00,0
+75355,2,1,17,205,2017-11-08 15:10:36,0
+20449,13,1,19,477,2017-11-08 00:41:40,0
+157121,12,1,17,328,2017-11-07 23:30:57,0
+96922,15,1,19,245,2017-11-08 02:15:02,0
+63947,18,1,23,107,2017-11-07 10:18:49,0
+128558,3,1,34,371,2017-11-08 00:09:30,0
+14909,20,1,25,259,2017-11-07 10:53:11,0
+145883,64,1,19,459,2017-11-07 10:26:24,0
+9314,3,1,17,130,2017-11-08 06:44:50,0
+5314,19,0,21,210,2017-11-07 16:58:07,0
+55428,2,1,13,477,2017-11-07 05:07:54,0
+102896,12,1,13,340,2017-11-08 04:11:17,0
+60348,12,1,13,145,2017-11-07 15:44:46,0
+104454,8,1,19,145,2017-11-09 04:50:13,0
+1204,2,1,3,452,2017-11-07 07:22:33,0
+134211,9,1,19,466,2017-11-09 03:13:32,0
+7352,18,1,8,134,2017-11-08 06:01:34,0
+201800,3,1,19,115,2017-11-09 12:15:57,0
+152045,26,1,19,477,2017-11-09 07:30:00,0
+31158,26,1,13,266,2017-11-08 09:17:35,0
+42132,23,1,19,153,2017-11-08 10:07:25,0
+99692,9,1,28,442,2017-11-08 16:37:17,0
+196669,21,1,47,128,2017-11-07 13:27:13,0
+193749,9,1,32,334,2017-11-09 10:56:18,0
+24876,21,2,6,128,2017-11-06 23:37:34,0
+68824,3,1,22,130,2017-11-09 11:56:33,0
+64593,3,1,19,280,2017-11-08 00:44:35,0
+88063,9,1,19,442,2017-11-09 13:25:40,0
+207967,27,1,19,122,2017-11-07 18:42:45,0
+3247,3,1,18,280,2017-11-08 00:51:11,0
+77582,9,1,41,489,2017-11-08 06:48:41,0
+44744,3,1,19,280,2017-11-07 07:45:29,0
+163461,3,1,13,409,2017-11-09 03:42:39,0
+15365,9,1,19,449,2017-11-08 12:50:56,0
+3811,15,1,13,245,2017-11-07 06:01:49,0
+248754,14,1,22,439,2017-11-09 15:49:54,0
+79620,9,1,14,232,2017-11-08 15:12:56,0
+80142,2,1,13,122,2017-11-08 15:25:00,0
+265898,26,1,35,121,2017-11-08 11:25:08,0
+107115,2,1,9,477,2017-11-09 15:40:19,0
+107768,9,1,19,215,2017-11-08 05:24:52,0
+76187,12,1,10,178,2017-11-07 13:09:52,0
+64172,3,1,17,489,2017-11-09 00:26:06,0
+116472,2,1,43,364,2017-11-09 14:03:07,0
+169973,8,1,13,145,2017-11-07 10:15:53,0
+79187,3,1,19,205,2017-11-08 07:13:00,0
+73278,15,1,19,245,2017-11-08 01:45:45,0
+158203,2,1,16,469,2017-11-06 22:49:38,0
+245382,12,1,13,497,2017-11-08 02:57:03,0
+185906,9,1,13,134,2017-11-08 00:15:42,0
+25607,3,1,6,280,2017-11-07 01:14:28,0
+82866,6,1,19,459,2017-11-08 12:05:42,0
+68271,15,1,18,245,2017-11-07 06:42:29,0
+201182,20,2,32,259,2017-11-08 16:10:29,0
+102225,9,1,13,134,2017-11-07 00:08:01,0
+11911,12,1,13,328,2017-11-07 05:26:48,0
+67606,3,1,4,280,2017-11-09 00:53:31,0
+100393,2,1,13,205,2017-11-06 23:43:37,0
+1528,2,1,13,469,2017-11-09 13:40:09,0
+69605,3,1,6,280,2017-11-07 03:14:04,0
+10328,12,1,6,105,2017-11-07 21:08:31,0
+259617,15,1,18,245,2017-11-08 14:29:09,0
+47231,18,1,13,134,2017-11-09 07:37:11,0
+4903,9,1,6,466,2017-11-07 07:36:55,0
+269756,15,1,19,480,2017-11-08 05:45:19,0
+84725,12,1,32,265,2017-11-09 08:59:27,0
+79909,12,1,47,328,2017-11-08 10:46:36,0
+184205,12,1,19,178,2017-11-07 11:48:09,0
+103715,9,1,10,489,2017-11-08 11:19:20,0
+31009,17,1,14,280,2017-11-08 02:15:14,0
+361877,2,1,19,469,2017-11-08 16:37:11,0
+70522,9,2,36,232,2017-11-09 09:00:45,0
+92852,1,1,14,134,2017-11-07 13:34:58,0
+202064,18,1,19,121,2017-11-07 17:23:50,0
+88127,2,2,13,364,2017-11-07 13:43:18,0
+209663,12,2,19,245,2017-11-08 09:50:27,0
+20327,3,1,13,19,2017-11-08 10:05:57,0
+31231,3,2,13,115,2017-11-08 08:44:25,0
+72986,14,1,19,463,2017-11-07 02:35:38,0
+55397,24,1,22,105,2017-11-07 05:15:04,0
+3641,9,1,19,258,2017-11-07 15:29:13,0
+58077,3,1,19,280,2017-11-07 02:10:51,0
+119369,2,1,35,219,2017-11-08 08:03:08,0
+95006,12,1,6,265,2017-11-07 12:15:47,0
+64393,17,1,19,134,2017-11-09 10:46:20,0
+51945,15,1,19,245,2017-11-07 00:36:33,0
+108490,24,1,19,105,2017-11-08 21:14:52,0
+53454,12,1,19,178,2017-11-07 15:01:46,0
+31784,72,1,19,101,2017-11-07 11:11:16,0
+105603,2,1,18,205,2017-11-09 11:17:47,0
+83388,12,1,10,178,2017-11-09 06:55:16,0
+167607,12,1,16,265,2017-11-06 16:25:11,0
+75595,3,1,10,489,2017-11-09 12:47:57,0
+72387,12,1,22,178,2017-11-06 22:33:18,0
+5348,3,1,19,452,2017-11-08 20:52:42,0
+28476,27,1,14,153,2017-11-09 03:31:59,0
+95473,18,1,20,107,2017-11-09 10:23:24,0
+39248,9,1,1,134,2017-11-09 14:31:37,0
+95006,21,1,3,232,2017-11-09 01:18:57,0
+95766,15,1,34,245,2017-11-07 06:44:20,0
+67040,29,1,17,347,2017-11-09 08:19:46,0
+97773,9,1,17,232,2017-11-09 06:18:53,0
+26241,2,1,12,219,2017-11-09 14:19:54,0
+82100,3,1,32,280,2017-11-08 09:14:40,0
+56233,12,1,17,205,2017-11-08 07:32:30,0
+216309,2,1,19,237,2017-11-08 07:52:21,0
+138714,1,1,1,452,2017-11-07 05:05:39,0
+77400,18,1,13,121,2017-11-08 10:58:25,0
+114728,3,1,19,489,2017-11-08 08:59:02,0
+114276,12,1,3,328,2017-11-07 05:45:32,0
+62315,12,1,13,212,2017-11-07 04:50:45,0
+44410,3,1,27,489,2017-11-08 02:14:16,0
+109703,23,1,15,153,2017-11-08 22:30:52,0
+91006,15,1,25,245,2017-11-07 18:30:03,0
+140525,2,1,27,237,2017-11-07 01:32:32,0
+31387,3,1,8,280,2017-11-09 02:56:41,0
+75520,8,2,53,259,2017-11-08 15:06:05,0
+166884,14,1,10,439,2017-11-07 11:37:29,0
+101395,3,1,19,280,2017-11-09 01:13:05,0
+70621,3,1,13,130,2017-11-08 03:19:21,0
+42784,3,1,13,115,2017-11-07 23:09:07,0
+3363,2,1,41,122,2017-11-09 09:28:39,0
+145896,9,1,17,445,2017-11-07 13:46:54,0
+149030,20,1,9,259,2017-11-09 06:05:35,0
+2850,12,1,31,259,2017-11-08 02:22:05,0
+42159,3,1,20,115,2017-11-08 23:14:10,0
+9057,12,1,19,140,2017-11-09 02:22:06,0
+17077,45,1,16,411,2017-11-07 01:01:12,0
+17149,17,2,9,280,2017-11-08 13:35:07,0
+30203,12,1,22,328,2017-11-06 23:25:16,0
+18695,13,1,14,400,2017-11-08 03:42:28,0
+50251,9,1,19,445,2017-11-07 12:05:29,0
+153926,1,1,47,153,2017-11-08 04:12:09,0
+201483,3,1,13,280,2017-11-07 02:31:25,0
+28722,2,1,8,236,2017-11-08 04:20:12,0
+33443,18,1,47,121,2017-11-08 08:33:26,0
+116226,18,1,19,439,2017-11-09 07:47:46,0
+44527,2,1,9,477,2017-11-07 16:08:16,0
+256336,3,1,13,280,2017-11-08 15:20:51,0
+6662,9,1,19,134,2017-11-07 00:29:21,0
+95527,9,1,20,442,2017-11-09 01:00:18,0
+24932,1,1,13,13,2017-11-08 02:45:46,0
+80935,3,1,3,480,2017-11-08 10:49:22,0
+113602,26,1,3,121,2017-11-09 12:09:56,0
+23203,15,1,25,412,2017-11-09 02:53:33,0
+104437,9,1,19,215,2017-11-09 14:52:38,0
+3133,3,1,37,489,2017-11-07 07:03:15,0
+278482,13,1,8,477,2017-11-08 09:35:38,0
+125608,9,1,8,215,2017-11-07 02:34:10,0
+41993,9,1,13,127,2017-11-09 13:20:25,0
+171690,7,1,49,101,2017-11-09 15:27:39,0
+108816,9,1,13,134,2017-11-07 19:05:34,0
+102264,21,1,19,232,2017-11-08 11:26:23,0
+44498,9,1,17,334,2017-11-07 06:56:44,0
+112911,3,1,19,424,2017-11-09 11:16:46,0
+62083,15,1,13,245,2017-11-07 18:10:06,0
+119289,18,1,18,107,2017-11-09 09:59:20,0
+43550,3,1,3,452,2017-11-08 08:32:30,0
+64054,15,1,13,265,2017-11-07 22:56:04,0
+105148,12,1,14,19,2017-11-09 12:38:10,0
+87764,2,1,19,477,2017-11-09 08:09:32,0
+2770,3,1,13,137,2017-11-08 19:17:29,0
+14044,15,1,6,265,2017-11-08 13:41:08,0
+25705,18,1,10,107,2017-11-08 00:33:13,0
+125915,3,1,13,280,2017-11-08 00:22:41,0
+107643,3,1,15,409,2017-11-08 11:10:59,0
+50512,12,1,17,145,2017-11-07 03:11:56,0
+162963,1,1,37,377,2017-11-06 23:31:56,0
+99600,3,1,19,489,2017-11-09 02:07:08,0
+12087,21,1,17,128,2017-11-07 02:08:23,0
+52564,3,1,13,137,2017-11-06 23:24:29,0
+199611,12,1,17,178,2017-11-08 09:47:20,0
+17917,14,1,17,401,2017-11-07 01:01:15,0
+64049,3,1,13,173,2017-11-07 02:46:17,0
+67008,12,1,23,178,2017-11-07 04:23:10,0
+157612,1,1,3,134,2017-11-07 14:37:55,0
+138557,14,1,16,463,2017-11-09 00:07:44,0
+58529,25,1,32,259,2017-11-08 02:13:07,0
+57828,8,1,13,145,2017-11-09 08:00:57,0
+106332,64,1,19,459,2017-11-09 01:43:34,0
+159355,3,1,13,280,2017-11-07 23:57:32,0
+45386,2,1,13,205,2017-11-08 21:23:15,0
+38998,14,1,19,123,2017-11-07 15:01:21,0
+87879,15,1,25,111,2017-11-06 16:35:12,0
+90586,3,1,19,137,2017-11-09 07:46:42,0
+116499,23,1,19,153,2017-11-07 04:19:46,0
+77107,14,1,42,401,2017-11-06 20:07:37,0
+5912,9,1,18,134,2017-11-07 00:12:50,0
+220958,15,1,13,386,2017-11-07 23:37:56,0
+84808,3,1,4,280,2017-11-08 18:13:54,0
+5178,2,1,19,219,2017-11-08 13:24:45,0
+40022,3,1,42,130,2017-11-08 14:37:57,0
+91821,3,1,6,280,2017-11-07 03:13:49,0
+62906,18,1,27,107,2017-11-07 14:43:33,0
+89447,12,1,19,245,2017-11-08 14:57:27,0
+44494,3,1,1,153,2017-11-07 17:04:58,0
+152743,12,1,13,178,2017-11-07 03:05:17,0
+1661,15,1,19,265,2017-11-09 06:32:26,0
+88881,2,1,19,237,2017-11-07 04:39:35,0
+24943,23,1,13,153,2017-11-07 05:42:00,0
+51784,3,1,15,452,2017-11-09 06:25:50,0
+115938,14,1,18,134,2017-11-09 10:05:24,0
+148224,18,1,8,134,2017-11-07 12:31:21,0
+36150,2,1,8,205,2017-11-08 03:48:00,0
+8179,15,1,10,274,2017-11-07 17:00:45,0
+75895,9,1,13,232,2017-11-07 14:22:57,0
+141511,2,1,17,243,2017-11-07 11:55:05,0
+100401,3,1,17,211,2017-11-09 15:17:18,0
+7064,3,2,40,137,2017-11-09 09:32:47,0
+16464,15,1,13,245,2017-11-07 10:15:00,0
+12491,12,1,13,140,2017-11-09 13:20:55,0
+150129,12,1,13,245,2017-11-07 09:27:03,0
+41753,12,1,26,265,2017-11-08 09:29:10,0
+32432,18,1,13,121,2017-11-09 07:02:18,0
+69173,12,1,4,105,2017-11-08 02:23:20,0
+91040,1,1,19,134,2017-11-07 23:26:23,0
+151097,11,1,27,319,2017-11-08 12:07:57,0
+197009,12,1,10,328,2017-11-08 20:25:29,0
+139235,9,1,20,334,2017-11-07 05:03:05,0
+2965,3,1,16,280,2017-11-09 05:52:43,0
+1923,36,1,8,373,2017-11-07 23:28:59,0
+30903,3,1,19,280,2017-11-09 00:24:16,0
+150112,3,1,13,280,2017-11-09 01:06:25,0
+197271,12,2,22,140,2017-11-08 07:19:20,0
+13831,15,1,18,153,2017-11-07 13:50:23,0
+60556,18,1,41,107,2017-11-09 14:41:02,0
+43222,3,1,17,424,2017-11-09 09:48:21,0
+248138,15,1,19,130,2017-11-08 08:32:10,0
+101074,2,1,19,205,2017-11-09 14:12:33,0
+72538,15,1,13,245,2017-11-08 01:30:09,0
+106770,3,1,8,280,2017-11-09 05:36:44,0
+145963,3,1,22,442,2017-11-08 05:38:07,0
+8482,14,1,25,467,2017-11-08 14:29:54,0
+35757,15,1,13,245,2017-11-08 04:04:50,0
+95692,6,1,1,459,2017-11-09 08:45:41,0
+117722,12,1,19,259,2017-11-08 22:10:47,0
+32434,14,1,13,208,2017-11-07 01:46:45,0
+105140,15,1,18,245,2017-11-07 14:06:37,0
+77209,14,1,19,442,2017-11-08 01:06:12,0
+18774,2,1,10,237,2017-11-08 15:04:28,0
+60698,9,1,13,232,2017-11-08 14:46:39,0
+67611,12,2,14,326,2017-11-08 14:56:27,0
+257500,14,1,34,349,2017-11-08 07:28:31,0
+90509,2,1,22,469,2017-11-06 19:42:43,0
+65362,1,1,13,137,2017-11-09 04:37:16,0
+87865,3,1,13,371,2017-11-07 00:45:06,0
+28084,9,1,13,466,2017-11-08 15:50:02,0
+86767,9,1,14,466,2017-11-08 13:27:36,0
+123994,12,1,13,265,2017-11-07 09:51:11,0
+105485,3,1,19,402,2017-11-09 10:22:01,0
+219838,3,1,13,205,2017-11-09 04:27:36,0
+105239,1,1,18,153,2017-11-07 14:23:11,0
+34284,18,1,13,134,2017-11-07 06:56:44,0
+162805,3,1,19,442,2017-11-07 10:16:08,0
+90855,3,1,13,130,2017-11-09 15:42:03,0
+32745,3,1,15,442,2017-11-09 02:12:19,0
+53479,9,2,18,215,2017-11-07 12:58:33,0
+76333,3,1,3,424,2017-11-08 03:04:16,0
+13073,2,1,19,469,2017-11-07 14:36:25,0
+20242,1,1,12,134,2017-11-09 00:35:16,0
+95029,2,1,13,122,2017-11-07 19:48:13,0
+118284,3,1,17,409,2017-11-09 15:53:58,0
+95766,3,1,14,280,2017-11-08 12:32:23,0
+2348,1,1,19,135,2017-11-09 04:27:18,0
+8536,3,1,11,379,2017-11-08 01:10:07,0
+111324,12,1,18,178,2017-11-08 08:47:45,0
+333429,9,2,13,232,2017-11-09 14:35:25,0
+66745,2,1,13,219,2017-11-08 15:22:06,0
+31823,2,1,19,219,2017-11-09 05:53:55,0
+17477,3,1,22,489,2017-11-08 17:42:30,0
+62892,18,1,25,439,2017-11-08 00:03:42,0
+31403,9,1,19,244,2017-11-07 11:52:12,0
+131583,2,1,23,122,2017-11-07 10:41:36,0
+9783,3,1,15,280,2017-11-07 12:10:13,0
+105264,12,1,13,140,2017-11-08 09:22:50,0
+73671,2,1,37,205,2017-11-08 19:36:53,0
+97188,18,1,10,107,2017-11-09 15:32:52,0
+5348,18,1,11,107,2017-11-08 14:27:08,0
+9236,1,1,19,137,2017-11-07 14:25:30,0
+44222,15,1,19,245,2017-11-09 07:07:44,0
+41106,2,1,18,236,2017-11-08 05:14:00,0
+70280,15,1,20,245,2017-11-08 17:46:47,0
+90655,50,0,21,213,2017-11-09 08:52:32,0
+63523,12,1,10,140,2017-11-07 05:46:27,0
+103047,26,1,4,477,2017-11-08 13:13:32,0
+54125,2,1,25,401,2017-11-08 06:52:04,0
+188049,12,1,19,178,2017-11-08 06:23:54,0
+38385,1,1,17,134,2017-11-09 08:12:17,0
+41172,15,1,19,3,2017-11-09 13:02:29,0
+43793,2,1,41,435,2017-11-09 10:28:29,0
+60909,2,1,15,236,2017-11-08 13:26:25,0
+4784,25,1,14,259,2017-11-07 11:50:17,0
+16972,3,1,6,489,2017-11-07 14:34:17,0
+76199,3,1,10,280,2017-11-08 08:41:48,0
+27646,3,1,22,280,2017-11-07 04:10:53,0
+15683,12,1,19,178,2017-11-06 16:59:57,0
+2564,3,1,47,130,2017-11-08 04:53:43,0
+48240,7,1,17,101,2017-11-09 12:48:08,0
+113682,9,1,19,334,2017-11-09 09:30:58,0
+98474,14,1,43,379,2017-11-07 11:56:52,0
+121187,3,1,18,280,2017-11-08 04:48:30,0
+109145,24,1,19,178,2017-11-09 11:54:32,0
+59456,15,1,13,245,2017-11-07 05:41:24,0
+43750,14,1,41,489,2017-11-09 09:44:06,0
+63790,9,2,13,134,2017-11-07 13:30:33,0
+26454,2,1,17,469,2017-11-07 02:57:48,0
+2358,3,1,19,371,2017-11-07 00:30:05,0
+74725,12,1,17,178,2017-11-08 10:53:20,0
+103332,2,1,13,435,2017-11-09 05:02:19,0
+97522,11,1,6,319,2017-11-08 13:01:54,0
+163311,12,1,6,245,2017-11-07 15:08:38,0
+283541,14,1,20,379,2017-11-07 16:58:35,0
+105437,15,1,19,245,2017-11-07 03:36:19,0
+53479,2,1,47,469,2017-11-07 13:50:23,0
+69088,9,1,22,334,2017-11-08 01:00:49,0
+80736,15,1,17,386,2017-11-07 14:41:46,0
+131588,3,1,9,409,2017-11-07 07:08:55,0
+71449,3,1,22,280,2017-11-08 14:00:49,0
+38076,35,1,13,21,2017-11-07 01:22:23,1
+236339,15,1,17,245,2017-11-08 05:52:12,0
+161370,11,1,10,122,2017-11-06 23:38:35,0
+118847,14,1,17,379,2017-11-07 07:35:32,0
+55000,3,1,13,280,2017-11-07 02:55:30,0
+9867,13,1,19,477,2017-11-07 12:25:32,0
+302497,3,1,13,137,2017-11-09 03:18:00,0
+168878,3,1,18,173,2017-11-06 21:10:25,0
+132716,9,1,8,489,2017-11-09 11:49:03,0
+14416,12,1,13,265,2017-11-07 19:22:09,0
+47669,2,1,10,469,2017-11-07 00:42:32,0
+168931,2,1,19,435,2017-11-07 02:44:18,0
+115746,23,1,19,153,2017-11-08 22:51:29,0
+10053,2,1,6,477,2017-11-07 04:51:12,0
+112806,20,2,19,259,2017-11-07 17:28:32,0
+64619,15,1,3,245,2017-11-08 14:03:53,0
+195073,3,1,18,280,2017-11-09 07:09:34,0
+14872,9,2,17,466,2017-11-09 14:52:38,0
+98471,2,1,9,122,2017-11-07 22:03:37,0
+6595,3,1,14,489,2017-11-08 14:45:46,0
+112350,3,1,3,137,2017-11-08 04:49:42,0
+63725,18,1,19,439,2017-11-09 12:23:00,0
+48518,3,1,6,489,2017-11-09 05:54:01,0
+39546,3,1,25,409,2017-11-08 15:35:13,0
+64567,14,1,19,463,2017-11-06 22:45:04,0
+70522,2,1,8,237,2017-11-08 12:32:09,0
+191797,18,3032,607,107,2017-11-06 21:23:33,0
+211289,18,1,16,107,2017-11-07 14:42:10,0
+95651,3,1,18,489,2017-11-07 00:37:19,0
+78801,2,1,22,236,2017-11-09 01:24:39,0
+109938,10,1,19,377,2017-11-07 15:31:14,0
+26870,9,1,6,127,2017-11-09 02:30:00,0
+283408,25,1,23,259,2017-11-08 12:29:28,0
+79787,12,2,13,178,2017-11-07 01:53:14,0
+75595,2,1,19,205,2017-11-08 00:12:51,0
+88344,1,1,37,115,2017-11-08 03:22:41,0
+73144,1,1,9,134,2017-11-09 05:12:14,0
+41662,14,1,22,480,2017-11-08 11:24:27,0
+91104,3,1,19,135,2017-11-07 14:40:36,0
+211623,15,1,13,130,2017-11-07 23:30:01,0
+98738,9,1,17,334,2017-11-06 23:49:11,0
+5348,9,1,12,442,2017-11-07 04:49:10,0
+75959,13,1,25,477,2017-11-08 12:50:43,0
+5348,11,1,27,481,2017-11-08 12:59:12,0
+65028,2,1,11,122,2017-11-09 02:59:56,0
+105249,18,1,37,439,2017-11-07 16:34:22,0
+332261,12,1,6,178,2017-11-09 02:18:51,0
+89562,3,1,25,280,2017-11-08 14:26:52,0
+27884,3,1,8,280,2017-11-08 14:55:15,0
+225683,3,1,27,280,2017-11-08 02:41:36,0
+102435,24,1,8,105,2017-11-06 17:12:14,0
+97600,8,1,17,140,2017-11-07 08:52:06,0
+67441,12,1,19,497,2017-11-07 04:27:40,0
+267267,1,1,19,137,2017-11-08 04:50:14,0
+5314,9,1,13,232,2017-11-09 13:42:47,0
+6568,15,1,11,265,2017-11-07 05:51:11,0
+1810,7,1,13,101,2017-11-09 06:52:00,0
+106972,1,1,53,137,2017-11-07 13:38:20,0
+213595,9,1,20,489,2017-11-09 10:00:00,0
+100186,9,1,34,215,2017-11-09 04:12:38,0
+80740,9,1,22,489,2017-11-08 10:29:03,0
+17747,11,1,22,319,2017-11-09 02:19:43,0
+42038,15,1,19,386,2017-11-09 08:48:30,0
+4727,15,1,19,140,2017-11-09 01:38:11,0
+119193,1,1,19,377,2017-11-07 05:04:53,0
+138561,18,1,11,121,2017-11-07 11:32:22,0
+11281,26,1,6,121,2017-11-08 07:32:23,0
+221083,13,1,19,477,2017-11-07 23:59:17,0
+35747,3,1,13,135,2017-11-07 14:11:07,0
+17144,3,1,27,452,2017-11-09 02:49:27,0
+105435,14,1,19,463,2017-11-07 02:47:49,0
+81402,18,3543,748,107,2017-11-07 22:34:56,0
+93486,10,1,13,317,2017-11-07 15:43:27,0
+88479,2,1,13,243,2017-11-07 05:13:13,0
+105264,12,1,3,145,2017-11-07 18:14:11,0
+49193,15,1,13,245,2017-11-09 05:05:59,0
+27318,14,1,18,401,2017-11-09 06:12:50,0
+109693,2,1,53,212,2017-11-08 00:31:26,0
+6950,14,1,19,463,2017-11-07 13:29:33,0
+57559,14,1,22,467,2017-11-07 16:00:56,0
+48529,15,1,13,130,2017-11-08 04:48:27,0
+37118,27,1,13,122,2017-11-07 10:16:25,0
+17865,14,2,19,134,2017-11-07 22:39:22,0
+130629,11,1,22,319,2017-11-08 04:01:29,0
+75595,24,1,19,105,2017-11-07 10:50:06,0
+26406,14,1,18,439,2017-11-07 04:24:49,0
+25980,12,1,19,245,2017-11-08 01:40:01,0
+49600,12,1,13,265,2017-11-07 15:13:07,0
+2788,15,1,13,386,2017-11-09 03:20:53,0
+45466,15,1,19,245,2017-11-06 17:20:27,0
+105519,2,1,19,205,2017-11-06 19:11:17,0
+39958,9,1,13,391,2017-11-07 10:57:14,0
+79213,15,1,19,245,2017-11-09 05:37:40,0
+304646,12,1,19,265,2017-11-09 00:14:03,0
+161546,24,1,13,178,2017-11-07 04:25:34,0
+17551,9,1,19,215,2017-11-08 15:43:35,0
+48671,3,2,9,280,2017-11-08 01:08:50,0
+20215,3,1,19,379,2017-11-09 14:31:51,0
+189286,12,1,6,178,2017-11-07 15:08:47,0
+95766,2,1,13,237,2017-11-07 05:17:06,0
+109743,12,2,19,259,2017-11-08 03:59:20,0
+106350,17,1,13,134,2017-11-08 00:38:58,0
+183647,15,1,19,130,2017-11-09 08:58:51,0
+5348,3,1,6,115,2017-11-08 01:38:34,0
+80634,12,1,17,340,2017-11-09 11:07:30,0
+29290,2,1,19,452,2017-11-09 12:30:04,0
+225617,15,1,35,245,2017-11-07 22:41:58,0
+61132,19,0,0,213,2017-11-06 16:43:37,1
+37080,13,1,46,477,2017-11-08 16:58:31,0
+3382,3,1,28,115,2017-11-07 23:45:57,0
+22507,2,1,3,122,2017-11-09 00:34:01,0
+69873,2,1,35,219,2017-11-09 05:48:28,0
+48282,9,2,7,466,2017-11-08 20:26:15,0
+32472,9,1,35,442,2017-11-09 08:48:04,0
+109425,2,1,47,469,2017-11-07 12:48:03,0
+123100,12,1,19,328,2017-11-09 08:04:51,0
+53454,3,1,19,182,2017-11-08 03:00:55,0
+78473,25,1,13,259,2017-11-07 03:53:12,0
+121979,19,0,21,213,2017-11-08 06:47:13,0
+209611,1,1,16,150,2017-11-07 11:57:19,0
+177171,9,1,25,466,2017-11-09 15:09:36,0
+322335,7,1,1,101,2017-11-09 14:02:27,0
+196513,21,1,15,128,2017-11-08 11:31:58,0
+167733,12,1,16,245,2017-11-08 02:50:31,0
+203156,20,1,13,478,2017-11-09 09:53:42,0
+13058,1,1,19,377,2017-11-09 12:06:01,0
+114656,3,1,13,280,2017-11-08 03:25:43,0
+95509,2,1,14,122,2017-11-09 02:03:17,0
+263949,6,2,6,459,2017-11-08 02:09:17,0
+49602,151,0,24,347,2017-11-07 13:24:00,0
+56007,14,1,13,118,2017-11-09 11:57:47,0
+103036,15,1,19,3,2017-11-07 12:34:57,0
+87891,12,1,13,265,2017-11-07 07:36:23,0
+3994,3,1,9,489,2017-11-09 01:22:39,0
+122495,21,1,26,128,2017-11-07 07:27:53,0
+43881,2,1,44,205,2017-11-09 14:04:53,0
+106598,17,1,9,280,2017-11-07 01:04:45,0
+113252,3,1,13,280,2017-11-09 06:02:32,0
+2348,62,3866,866,21,2017-11-09 07:57:34,0
+90891,9,1,3,127,2017-11-09 12:09:24,0
+8492,15,1,22,245,2017-11-09 04:34:12,0
+78223,3,1,18,115,2017-11-09 07:03:37,0
+67658,3,1,13,280,2017-11-09 02:37:13,0
+55339,103,1,17,21,2017-11-08 23:22:36,0
+304120,9,1,8,490,2017-11-09 05:17:36,0
+105587,3,1,17,280,2017-11-09 02:10:05,0
+221787,13,1,13,477,2017-11-09 05:10:38,0
+213877,98,1,18,224,2017-11-07 18:57:19,1
+107622,2,1,19,237,2017-11-09 01:03:54,0
+84587,2,1,19,122,2017-11-08 11:42:23,0
+48552,15,1,17,430,2017-11-07 02:11:02,0
+130629,2,1,13,317,2017-11-08 10:48:01,0
+304608,9,1,25,244,2017-11-09 04:27:07,0
+240151,15,1,1,480,2017-11-08 11:10:47,0
+72831,2,1,18,401,2017-11-09 13:27:42,0
+34680,2,1,3,122,2017-11-08 15:42:00,0
+109735,18,1,41,107,2017-11-07 20:28:17,0
+112231,3,1,8,137,2017-11-07 22:21:07,0
+60207,8,1,3,145,2017-11-09 04:55:15,0
+123681,14,1,13,208,2017-11-07 00:33:32,0
+74284,2,1,13,236,2017-11-09 04:04:12,0
+95991,8,1,13,145,2017-11-09 09:22:57,0
+66172,3,1,19,137,2017-11-08 22:43:17,0
+71436,3,1,25,137,2017-11-07 17:30:26,0
+90991,14,1,19,439,2017-11-08 09:54:13,0
+8330,11,1,3,481,2017-11-07 11:48:00,0
+49602,18,1,19,134,2017-11-08 13:25:11,0
+119760,12,1,22,178,2017-11-06 16:28:58,0
+102025,9,2,17,134,2017-11-08 22:37:09,0
+102897,9,1,9,232,2017-11-09 12:47:29,0
+292121,6,1,1,459,2017-11-08 20:06:58,0
+90654,9,1,13,232,2017-11-07 04:58:07,0
+2600,8,1,17,145,2017-11-07 13:08:52,0
+61395,12,1,41,265,2017-11-08 04:17:29,0
+118625,14,1,18,134,2017-11-08 15:31:21,0
+117898,29,1,13,347,2017-11-09 02:51:19,0
+103946,21,1,17,232,2017-11-09 13:54:57,0
+164896,2,1,13,477,2017-11-07 03:13:36,0
+33008,15,1,19,245,2017-11-08 14:15:45,0
+20422,9,1,19,466,2017-11-09 15:21:59,0
+54125,3,1,18,135,2017-11-07 10:26:21,0
+98236,14,1,19,134,2017-11-09 07:00:06,0
+67450,2,1,13,212,2017-11-08 22:53:42,0
+73516,12,1,19,326,2017-11-08 22:15:55,0
+110000,2,1,19,477,2017-11-09 07:05:16,0
+88719,23,1,25,153,2017-11-09 10:40:28,0
+39755,3,1,8,130,2017-11-06 23:27:14,0
+32661,12,1,19,178,2017-11-09 05:20:57,0
+35984,2,1,13,452,2017-11-08 04:25:59,0
+93620,18,1,15,439,2017-11-06 19:09:14,0
+48282,15,1,18,130,2017-11-09 11:20:25,0
+102994,3,1,16,280,2017-11-08 13:24:51,0
+7749,15,1,13,265,2017-11-08 15:04:52,0
+66831,2,1,19,219,2017-11-07 14:59:36,0
+25041,12,1,18,259,2017-11-07 04:19:49,0
+103052,3,2,13,137,2017-11-07 13:17:14,0
+129931,3,1,13,130,2017-11-07 23:25:39,0
+164682,82,1,13,21,2017-11-08 06:06:21,0
+115525,8,1,14,145,2017-11-07 09:16:18,0
+70975,2,1,8,435,2017-11-09 06:52:37,0
+88312,23,1,35,153,2017-11-09 03:22:46,0
+123788,12,1,19,409,2017-11-06 20:23:47,0
+210555,14,1,14,480,2017-11-09 08:52:59,0
+25352,15,1,13,315,2017-11-08 08:19:42,0
+4989,2,1,42,477,2017-11-07 05:44:22,0
+90731,18,1,19,439,2017-11-07 15:59:56,0
+106507,15,1,19,430,2017-11-07 00:15:29,0
+169475,14,1,16,349,2017-11-07 13:38:00,0
+596,15,1,19,265,2017-11-08 18:28:25,0
+35905,18,1,36,107,2017-11-09 01:31:03,0
+45327,2,1,10,205,2017-11-08 07:21:17,0
+84703,28,1,37,135,2017-11-09 12:50:25,0
+5314,14,1,6,463,2017-11-07 14:16:30,0
+95434,7,1,19,101,2017-11-09 05:57:20,0
+186808,27,1,18,122,2017-11-08 23:24:33,0
+93230,15,1,19,315,2017-11-08 23:27:53,0
+123701,2,1,13,237,2017-11-09 05:45:24,0
+59125,2,2,19,205,2017-11-07 15:45:06,0
+20215,6,1,20,459,2017-11-06 23:10:55,0
+71286,3,1,23,280,2017-11-07 05:10:52,0
+36720,3,1,13,280,2017-11-08 02:21:35,0
+113812,2,1,19,219,2017-11-09 11:42:40,0
+109035,12,1,37,409,2017-11-09 05:44:45,0
+36691,10,1,22,377,2017-11-09 11:18:35,0
+106279,3,1,19,280,2017-11-09 07:14:39,0
+42956,21,1,10,128,2017-11-09 04:10:58,0
+116708,7,1,13,101,2017-11-07 10:27:44,0
+158006,2,1,22,477,2017-11-06 17:36:13,0
+115292,14,1,22,379,2017-11-08 02:35:27,0
+16032,2,1,13,435,2017-11-08 01:43:54,0
+64265,3,1,13,280,2017-11-08 08:54:27,0
+29540,9,1,13,258,2017-11-08 10:37:19,0
+44582,37,1,17,21,2017-11-07 10:23:30,0
+79902,3,1,19,205,2017-11-09 04:00:57,0
+67441,2,1,19,258,2017-11-07 01:03:22,0
+112086,3,1,17,280,2017-11-08 12:54:05,0
+183696,9,1,10,215,2017-11-07 05:33:06,0
+108913,14,1,19,379,2017-11-08 12:48:22,0
+94958,18,1,13,439,2017-11-09 10:40:28,0
+94448,12,1,19,245,2017-11-08 13:48:55,0
+46656,18,1,13,107,2017-11-07 07:30:51,0
+64341,2,1,22,237,2017-11-09 11:27:54,0
+15771,26,1,13,266,2017-11-07 01:20:28,0
+48043,15,1,19,315,2017-11-07 07:17:29,0
+56257,12,1,18,265,2017-11-08 04:05:03,0
+123878,10,1,17,317,2017-11-08 10:03:18,0
+44757,2,1,16,237,2017-11-08 03:00:42,0
+76788,2,1,20,435,2017-11-09 02:07:58,0
+18747,14,1,1,134,2017-11-07 03:21:13,0
+36557,20,1,19,259,2017-11-07 10:21:41,0
+69484,14,1,19,379,2017-11-07 11:11:16,0
+108925,3,1,19,280,2017-11-09 02:57:27,0
+107586,15,1,19,140,2017-11-07 00:03:42,0
+38852,13,1,15,477,2017-11-07 09:00:23,0
+18365,3,1,25,130,2017-11-09 03:13:27,0
+51428,9,1,18,334,2017-11-07 14:54:32,0
+5348,3,1,12,280,2017-11-08 14:12:22,0
+102135,18,1,19,121,2017-11-07 08:34:28,0
+95294,3,1,15,280,2017-11-07 13:38:10,0
+102174,3,1,19,280,2017-11-08 16:38:29,0
+177236,10,1,25,377,2017-11-08 14:28:06,0
+84896,20,1,19,478,2017-11-07 12:25:38,0
+119377,15,1,41,379,2017-11-09 14:59:54,0
+68988,2,1,13,212,2017-11-08 14:52:47,0
+80743,4,1,19,101,2017-11-09 08:47:08,0
+133461,2,1,19,435,2017-11-07 03:06:19,0
+28113,12,1,17,178,2017-11-09 12:58:11,0
+148745,12,1,25,140,2017-11-07 11:00:31,0
+111251,15,1,49,245,2017-11-07 14:22:30,0
+65589,8,1,18,145,2017-11-07 16:30:10,0
+111324,15,1,18,245,2017-11-08 06:56:49,0
+55874,6,1,13,459,2017-11-07 04:18:21,0
+34253,2,1,19,452,2017-11-07 05:35:02,0
+85028,18,1,22,121,2017-11-09 04:29:12,0
+1142,2,1,17,477,2017-11-09 09:06:21,0
+85192,24,2,19,105,2017-11-07 22:57:09,0
+25009,15,2,17,245,2017-11-09 03:26:28,0
+250546,23,1,9,153,2017-11-08 15:14:14,0
+125190,2,1,16,477,2017-11-08 07:31:27,0
+54984,15,1,41,278,2017-11-08 13:34:20,0
+12340,1,1,13,124,2017-11-07 09:03:22,0
+48282,9,1,15,215,2017-11-08 14:27:27,0
+79528,12,1,13,259,2017-11-08 11:32:26,0
+126168,9,1,22,334,2017-11-08 00:20:44,0
+101265,17,1,13,280,2017-11-07 00:56:53,0
+175111,3,1,18,280,2017-11-08 04:26:59,0
+49602,18,1,19,107,2017-11-07 14:06:28,0
+53058,9,1,19,134,2017-11-07 16:07:51,0
+20362,21,2,9,232,2017-11-08 14:30:50,0
+117944,15,1,13,130,2017-11-09 03:06:59,0
+228795,21,1,13,232,2017-11-09 13:19:13,0
+54524,28,1,15,135,2017-11-08 06:30:59,0
+85190,1,1,19,118,2017-11-06 23:51:11,0
+48255,3,1,13,135,2017-11-08 05:15:02,0
+208937,9,1,19,134,2017-11-07 21:44:43,0
+128799,110,3866,866,347,2017-11-09 09:42:16,0
+51347,12,1,32,245,2017-11-07 15:55:35,0
+118512,9,1,20,442,2017-11-09 11:45:33,0
+7255,9,1,19,489,2017-11-08 14:51:23,0
+22593,14,1,19,134,2017-11-08 09:52:57,0
+98424,9,1,1,107,2017-11-08 14:48:51,0
+38482,15,1,13,386,2017-11-07 08:01:46,0
+78420,15,1,25,130,2017-11-07 14:38:54,0
+98971,32,1,23,376,2017-11-09 00:24:41,0
+22237,7,1,22,101,2017-11-07 10:23:26,0
+64673,2,1,19,237,2017-11-08 05:07:07,0
+125042,22,1,6,116,2017-11-08 09:32:12,0
+53454,6,1,25,459,2017-11-07 05:52:33,0
+88281,18,1,19,107,2017-11-08 11:38:24,0
+119369,20,1,3,478,2017-11-09 11:35:29,0
+47377,6,1,17,459,2017-11-09 07:42:27,0
+2095,14,1,19,463,2017-11-08 07:33:51,0
+29085,15,1,13,480,2017-11-09 15:22:34,0
+132062,2,1,35,237,2017-11-09 03:30:00,0
+110938,2,1,13,237,2017-11-06 23:27:44,0
+116285,9,1,19,445,2017-11-08 02:32:10,0
+115092,18,1,19,107,2017-11-08 06:14:21,0
+95779,3,1,18,317,2017-11-08 07:31:19,0
+113854,3,1,18,280,2017-11-09 02:13:01,0
+94059,3,1,10,280,2017-11-09 00:05:24,0
+4052,12,1,6,19,2017-11-08 16:38:19,0
+162820,28,1,19,135,2017-11-09 04:58:02,0
+142547,15,1,18,111,2017-11-08 08:46:13,0
+48282,7,1,5,101,2017-11-08 05:03:46,0
+16859,15,1,10,430,2017-11-07 09:32:15,0
+111847,64,1,23,459,2017-11-07 23:44:45,0
+93291,12,1,19,340,2017-11-09 12:00:09,0
+101437,3,1,19,280,2017-11-08 03:15:31,0
+123012,21,2,13,128,2017-11-06 23:19:39,0
+13812,6,1,13,459,2017-11-07 16:35:57,0
+111025,14,1,16,371,2017-11-09 15:02:41,0
+97744,4,1,17,101,2017-11-08 02:37:04,0
+65582,9,1,20,466,2017-11-08 08:17:26,0
+2753,12,1,6,277,2017-11-08 01:04:08,0
+45318,8,1,13,140,2017-11-08 12:10:27,0
+77325,14,1,9,379,2017-11-09 05:04:48,0
+5348,3,1,10,280,2017-11-08 10:42:23,0
+44744,2,1,13,435,2017-11-07 00:14:00,0
+10239,3,1,19,280,2017-11-08 04:47:32,0
+168163,2,1,10,377,2017-11-09 02:44:50,0
+164229,2,1,19,236,2017-11-07 12:54:37,0
+95631,13,1,19,477,2017-11-09 01:25:30,0
+44744,21,1,18,232,2017-11-07 16:29:33,0
+37120,2,1,20,477,2017-11-09 07:19:09,0
+84510,1,1,19,153,2017-11-07 12:55:55,0
+54065,29,1,19,101,2017-11-08 12:12:52,0
+52766,2,1,13,122,2017-11-07 17:11:14,0
+104978,15,1,19,265,2017-11-07 14:25:50,0
+105485,2,1,8,205,2017-11-07 18:18:53,0
+65631,3,1,32,130,2017-11-07 10:43:55,0
+79881,3,1,19,280,2017-11-09 05:59:23,0
+29403,15,1,17,245,2017-11-07 12:26:59,0
+160869,15,1,41,245,2017-11-08 09:58:32,0
+103648,6,1,13,459,2017-11-08 05:41:48,0
+100275,2,1,19,477,2017-11-07 07:55:14,0
+188586,12,1,8,497,2017-11-07 01:52:00,0
+200987,2,1,13,219,2017-11-07 00:19:55,0
+93383,20,1,13,259,2017-11-08 13:46:36,0
+25321,3,1,13,280,2017-11-08 00:05:46,0
+201182,12,1,22,145,2017-11-06 20:48:37,0
+117423,3,1,19,280,2017-11-07 11:09:36,0
+23907,12,1,13,259,2017-11-06 19:59:35,0
+77048,2,1,13,205,2017-11-08 15:05:53,0
+86419,18,1,37,107,2017-11-09 00:34:17,0
+5381,12,1,19,328,2017-11-09 08:13:04,0
+57295,15,1,23,245,2017-11-09 03:40:18,0
+47083,2,1,19,236,2017-11-08 06:04:52,0
+53964,11,1,28,360,2017-11-09 14:35:19,0
+14921,2,1,18,435,2017-11-07 04:36:03,0
+92766,9,1,20,234,2017-11-08 22:08:33,0
+41978,12,1,40,178,2017-11-07 01:36:03,0
+141432,15,1,10,153,2017-11-08 09:49:11,0
+73487,12,1,19,245,2017-11-07 03:49:58,0
+118157,9,1,19,134,2017-11-07 22:33:48,0
+164737,56,1,13,406,2017-11-08 03:03:57,0
+246626,18,1,22,134,2017-11-08 01:53:28,0
+109826,3,1,20,280,2017-11-08 05:33:48,0
+32665,18,1,19,376,2017-11-09 06:00:03,0
+73487,18,1,53,134,2017-11-09 13:21:11,0
+13886,12,1,13,259,2017-11-08 15:17:21,0
+101300,3,1,20,280,2017-11-08 10:14:54,0
+64620,5,1,13,317,2017-11-09 07:22:32,0
+115585,3,1,49,280,2017-11-08 02:09:10,0
+60320,3,1,3,115,2017-11-06 22:44:12,0
+23073,3,1,13,280,2017-11-08 01:40:55,0
+41313,3,1,17,280,2017-11-09 02:34:50,0
+133103,3,1,13,280,2017-11-08 10:30:09,0
+47273,8,1,6,145,2017-11-09 11:40:32,0
+82137,3,1,23,115,2017-11-07 18:51:52,0
+156730,26,1,42,121,2017-11-07 08:29:06,0
+8924,1,1,13,118,2017-11-08 00:45:42,0
+105603,15,2,6,153,2017-11-09 05:29:06,0
+93234,2,1,27,469,2017-11-07 15:23:56,0
+122032,13,1,19,477,2017-11-08 03:15:04,0
+73487,14,1,3,489,2017-11-08 03:53:58,0
+147211,9,1,13,134,2017-11-07 00:11:22,0
+69904,22,1,47,116,2017-11-09 03:53:43,0
+56166,2,1,26,435,2017-11-08 06:12:13,0
+37794,15,1,47,315,2017-11-09 09:28:34,0
+83803,18,1,25,376,2017-11-08 13:44:23,0
+95766,18,1,70,134,2017-11-08 05:04:20,0
+50037,17,1,13,280,2017-11-08 07:43:24,0
+78447,9,1,13,134,2017-11-06 16:13:34,0
+57591,3,1,6,280,2017-11-08 14:36:00,0
+44926,12,1,16,205,2017-11-09 14:56:53,0
+33924,18,1,18,107,2017-11-09 06:59:52,0
+117867,3,1,19,280,2017-11-07 13:15:46,0
+68804,3,1,25,379,2017-11-08 03:22:04,0
+113357,15,1,23,245,2017-11-08 03:45:00,0
+16478,28,1,13,135,2017-11-06 16:20:07,0
+52660,11,1,1,219,2017-11-09 06:05:33,0
+155833,15,1,41,245,2017-11-06 19:57:54,0
+69944,14,1,19,401,2017-11-09 02:18:53,0
+16693,12,1,19,122,2017-11-09 01:00:26,0
+191236,18,1,41,121,2017-11-08 23:43:30,0
+41261,15,1,19,245,2017-11-08 23:54:24,0
+106493,12,1,13,259,2017-11-07 06:06:11,0
+111025,18,1,27,107,2017-11-09 00:54:26,0
+114276,3,1,3,280,2017-11-09 01:39:32,0
+63205,9,1,19,232,2017-11-06 23:03:12,0
+8499,8,1,13,145,2017-11-08 23:52:50,0
+129931,2,1,35,477,2017-11-08 23:03:51,0
+43537,7,1,19,101,2017-11-07 11:23:38,0
+16579,3,1,10,280,2017-11-08 13:07:01,0
+122417,12,1,19,19,2017-11-07 07:21:56,0
+125833,1,1,6,125,2017-11-08 12:02:05,0
+105753,18,1,37,107,2017-11-08 03:40:19,0
+90555,8,1,17,145,2017-11-07 23:19:29,0
+14751,12,1,20,178,2017-11-09 12:05:30,0
+152257,9,1,10,489,2017-11-09 09:01:22,0
+14130,18,1,13,107,2017-11-08 16:19:31,0
+9923,1,1,13,13,2017-11-09 11:07:58,0
+60554,3,1,19,280,2017-11-07 05:43:48,0
+4957,3,1,12,424,2017-11-07 03:41:17,0
+59745,22,1,13,496,2017-11-07 02:43:18,0
+52043,9,1,3,234,2017-11-07 09:50:58,0
+163168,12,1,13,245,2017-11-08 07:43:08,0
+132703,3,1,13,130,2017-11-07 05:12:37,0
+123577,2,1,27,469,2017-11-07 01:26:31,0
+42869,12,1,22,182,2017-11-08 02:26:27,0
+22187,2,1,13,435,2017-11-09 13:37:40,0
+92852,12,1,19,326,2017-11-08 08:11:59,0
+83306,21,1,19,128,2017-11-08 04:39:31,0
+173085,18,1,22,107,2017-11-08 13:40:37,0
+5314,18,1,19,121,2017-11-08 07:55:10,0
+121906,3,1,10,379,2017-11-09 00:00:09,0
+44051,2,1,15,237,2017-11-08 04:13:44,0
+97773,12,1,25,265,2017-11-07 08:14:57,0
+79157,3,1,19,280,2017-11-09 00:28:35,0
+117524,12,1,37,19,2017-11-08 00:20:11,0
+100896,17,1,25,134,2017-11-07 05:10:51,0
+95104,2,1,19,435,2017-11-08 14:08:55,0
+304196,12,1,13,140,2017-11-09 10:15:46,0
+8940,18,1,13,121,2017-11-07 03:03:28,0
+37565,3,1,13,280,2017-11-08 17:49:07,0
+110042,18,1,41,134,2017-11-08 07:46:14,0
+195573,9,1,1,107,2017-11-09 09:08:29,0
+25792,18,1,13,107,2017-11-07 16:21:11,0
+61168,9,2,9,466,2017-11-08 14:52:39,0
+45308,20,1,35,478,2017-11-09 11:38:16,0
+38185,28,1,13,135,2017-11-09 13:42:12,0
+42143,3,1,14,452,2017-11-07 00:07:06,0
+182403,15,1,19,111,2017-11-07 06:14:38,0
+155761,8,1,13,145,2017-11-07 04:18:53,0
+61763,3,1,14,442,2017-11-09 01:22:42,0
+85096,3,1,13,317,2017-11-08 07:51:21,0
+178581,12,1,13,178,2017-11-07 12:43:46,0
+145896,26,1,19,266,2017-11-06 22:59:01,0
+55040,15,1,13,480,2017-11-09 05:55:58,0
+270296,23,1,15,153,2017-11-09 08:03:55,0
+107921,2,1,19,122,2017-11-09 14:12:23,0
+39782,2,2,25,205,2017-11-09 15:30:58,0
+55663,9,1,1,215,2017-11-07 06:08:27,0
+224325,3,1,13,280,2017-11-09 04:47:30,0
+106883,18,1,19,134,2017-11-07 07:58:29,0
+172498,2,1,10,452,2017-11-07 01:36:33,0
+121505,8,2,13,145,2017-11-08 16:00:48,0
+153624,3,1,13,205,2017-11-09 14:35:34,0
+5348,2,1,41,477,2017-11-09 10:29:06,0
+84896,3,1,13,442,2017-11-07 05:04:40,0
+80413,22,1,17,496,2017-11-07 11:16:56,0
+90605,25,1,19,259,2017-11-07 04:59:30,0
+12634,3,1,10,489,2017-11-08 19:51:26,0
+93593,12,1,36,265,2017-11-07 13:50:20,0
+92712,3,1,13,153,2017-11-07 12:01:09,0
+7006,12,1,19,178,2017-11-07 11:01:51,0
+87998,3,1,13,280,2017-11-07 02:18:44,0
+5314,8,1,8,145,2017-11-09 12:28:57,0
+70795,27,1,13,153,2017-11-08 09:16:09,0
+167091,14,1,1,401,2017-11-09 10:57:52,0
+112775,3,1,19,115,2017-11-08 18:09:17,0
+100840,13,1,13,469,2017-11-08 05:59:09,0
+111162,2,1,19,122,2017-11-08 02:21:49,0
+1611,12,1,19,259,2017-11-07 17:51:51,0
+220134,3,1,13,280,2017-11-08 11:47:32,0
+114276,3,1,13,137,2017-11-07 12:50:40,0
+65687,6,1,19,125,2017-11-09 08:13:06,0
+79103,12,1,10,409,2017-11-09 09:07:22,0
+136291,18,1,19,107,2017-11-08 17:52:39,0
+37158,18,1,13,107,2017-11-08 09:23:56,0
+284,3,1,6,489,2017-11-09 13:06:53,0
+103396,3,1,13,130,2017-11-08 12:41:28,0
+70253,2,1,19,212,2017-11-08 15:06:44,0
+163326,26,1,19,266,2017-11-09 08:29:10,0
+111363,1,2,9,153,2017-11-08 01:37:47,0
+20576,6,1,20,459,2017-11-07 11:21:22,0
+45992,9,1,19,466,2017-11-07 09:59:14,0
+110064,3,1,1,280,2017-11-07 05:19:17,0
+46774,18,1,18,107,2017-11-09 08:03:00,0
+106598,3,1,37,211,2017-11-08 16:32:42,0
+10314,18,1,18,134,2017-11-08 03:07:55,0
+91661,2,1,13,477,2017-11-07 15:36:10,0
+119222,18,1,22,107,2017-11-07 08:27:19,0
+84934,3,1,6,280,2017-11-09 05:50:55,0
+100694,1,1,22,153,2017-11-07 23:35:05,0
+67393,18,1,19,134,2017-11-09 08:26:03,0
+3964,2,1,10,236,2017-11-08 07:42:12,0
+44744,15,1,19,130,2017-11-06 16:15:53,0
+196216,2,1,13,477,2017-11-08 00:50:19,0
+251954,12,1,17,265,2017-11-08 05:20:33,0
+104548,3,1,607,137,2017-11-07 13:47:46,0
+58237,2,1,14,236,2017-11-07 12:45:35,0
+102027,3,1,17,135,2017-11-09 01:14:03,0
+125222,14,1,13,489,2017-11-07 11:09:30,0
+63986,2,1,13,377,2017-11-08 13:17:29,0
+108690,9,1,18,489,2017-11-08 10:11:02,0
+180184,28,1,10,135,2017-11-09 04:43:38,0
+106162,3,1,19,280,2017-11-08 14:33:08,0
+63636,26,1,25,121,2017-11-07 15:38:48,0
+96083,23,1,22,153,2017-11-09 02:33:15,0
+63989,14,1,17,401,2017-11-09 06:57:24,0
+105448,3,1,17,280,2017-11-07 06:47:42,0
+3488,8,1,19,145,2017-11-09 07:07:32,0
+239935,22,1,19,116,2017-11-08 05:20:54,0
+8506,12,1,8,265,2017-11-08 08:47:24,0
+59719,12,1,19,178,2017-11-07 06:01:46,0
+9820,9,1,49,442,2017-11-09 11:14:37,0
+159958,15,1,13,379,2017-11-07 07:51:24,0
+72627,9,1,8,134,2017-11-09 01:52:51,0
+85328,9,1,22,442,2017-11-08 23:07:30,0
+73516,15,2,77,245,2017-11-08 11:58:25,0
+127902,18,1,10,107,2017-11-07 13:27:00,0
+178851,2,1,19,205,2017-11-07 04:55:19,0
+144617,8,1,25,145,2017-11-07 15:14:36,0
+50658,9,1,13,466,2017-11-09 06:36:55,0
+55142,3,1,23,211,2017-11-08 17:35:29,0
+113389,3,1,12,280,2017-11-09 00:58:09,0
+133441,3,1,13,489,2017-11-07 05:59:13,0
+164213,11,1,10,319,2017-11-08 11:50:07,0
+49553,15,1,16,480,2017-11-07 08:13:05,0
+78992,2,1,20,243,2017-11-09 04:15:51,0
+104622,9,1,19,134,2017-11-06 16:20:42,0
+67682,3,1,19,173,2017-11-09 05:11:48,0
+155145,3,1,18,280,2017-11-08 15:29:19,0
+88050,15,1,17,430,2017-11-08 10:55:33,0
+39711,6,1,3,459,2017-11-09 07:09:44,0
+3641,18,1,4,439,2017-11-07 02:15:18,0
+134524,9,1,17,466,2017-11-08 03:31:21,0
+169040,1,1,53,377,2017-11-07 11:48:13,0
+5314,18,1,19,107,2017-11-09 12:16:22,0
+5785,9,1,13,232,2017-11-06 23:04:11,0
+101074,151,0,38,347,2017-11-09 01:53:45,0
+25485,14,1,13,489,2017-11-06 23:02:07,0
+10511,21,1,13,128,2017-11-06 23:01:39,0
+63964,21,1,37,128,2017-11-09 13:39:16,0
+132723,2,1,13,237,2017-11-09 05:10:46,0
+100724,3,1,13,173,2017-11-09 05:22:57,0
+99503,27,1,17,122,2017-11-09 00:49:19,0
+98881,13,1,19,469,2017-11-08 03:58:08,0
+25009,22,1,13,116,2017-11-09 08:42:33,0
+36052,2,1,13,477,2017-11-09 05:26:25,0
+103077,9,1,4,244,2017-11-09 10:48:28,0
+353,6,2,77,125,2017-11-08 07:17:27,0
+93021,12,2,37,265,2017-11-08 08:51:28,0
+130827,14,1,27,401,2017-11-09 09:40:21,0
+68729,6,1,9,125,2017-11-06 22:48:03,0
+77403,18,1,23,376,2017-11-08 19:18:51,0
+86621,2,1,13,236,2017-11-06 16:44:44,0
+91661,11,1,13,325,2017-11-07 06:28:32,0
+127969,1,1,18,178,2017-11-07 06:55:07,0
+14764,15,1,19,3,2017-11-08 13:40:22,0
+34520,18,1,8,107,2017-11-08 05:13:14,0
+38219,2,1,3,122,2017-11-09 03:06:01,0
+206858,9,1,16,107,2017-11-09 14:02:50,0
+89426,12,1,9,265,2017-11-09 02:03:54,0
+233814,3,1,17,211,2017-11-08 05:01:10,0
+71762,12,1,10,145,2017-11-09 01:09:28,0
+93523,6,1,11,125,2017-11-08 12:51:20,0
+50128,13,1,19,477,2017-11-07 04:29:37,0
+280511,3,1,19,280,2017-11-08 07:29:54,0
+77399,21,1,3,232,2017-11-09 11:24:11,0
+1235,3,1,16,317,2017-11-08 07:47:20,0
+91574,2,2,9,205,2017-11-08 15:45:40,0
+5178,7,1,26,101,2017-11-09 04:37:27,0
+43166,1,1,37,124,2017-11-09 13:42:11,0
+99754,14,1,19,480,2017-11-07 06:38:40,0
+249343,2,1,19,364,2017-11-08 08:11:52,0
+96784,13,1,19,477,2017-11-09 06:08:02,0
+94167,9,1,10,127,2017-11-09 11:26:48,0
+71805,12,1,13,265,2017-11-09 03:34:36,0
+190183,12,1,19,259,2017-11-08 13:32:14,0
+105475,7,1,9,101,2017-11-09 11:22:23,0
+109703,14,1,6,379,2017-11-08 13:59:41,0
+74497,3,1,13,135,2017-11-08 21:51:49,0
+5348,21,2,19,128,2017-11-08 11:27:58,0
+4646,12,2,19,124,2017-11-07 13:28:03,0
+5314,64,1,41,459,2017-11-08 14:12:49,0
+105292,12,1,13,178,2017-11-07 11:13:49,0
+94921,3,1,19,280,2017-11-08 05:56:33,0
+124700,3,1,13,424,2017-11-09 01:41:04,0
+90855,15,1,6,245,2017-11-08 07:41:08,0
+156664,2,1,22,237,2017-11-08 05:39:48,0
+46053,12,1,53,328,2017-11-07 12:32:37,0
+59123,15,1,19,315,2017-11-08 02:44:59,0
+39248,12,1,13,245,2017-11-08 08:25:11,0
+48240,7,1,19,101,2017-11-09 08:21:05,0
+101957,15,1,18,111,2017-11-09 01:57:52,0
+82449,9,1,18,445,2017-11-09 08:27:28,0
+114276,1,1,13,134,2017-11-07 00:24:16,0
+17149,3,1,17,115,2017-11-07 04:12:38,0
+43803,18,1,13,107,2017-11-09 13:31:10,0
+124608,12,1,19,245,2017-11-07 05:34:24,0
+28379,2,1,9,212,2017-11-08 12:41:45,0
+21734,9,1,35,442,2017-11-08 17:18:44,0
+81456,94,1,17,361,2017-11-09 10:20:33,0
+60752,13,1,8,469,2017-11-07 12:08:43,0
+9704,18,1,37,107,2017-11-09 09:31:00,0
+93542,8,2,13,145,2017-11-09 13:37:06,0
+56319,15,1,18,379,2017-11-08 18:31:17,0
+55402,18,1,37,121,2017-11-07 20:31:08,0
+9602,8,1,19,145,2017-11-07 16:56:00,0
+100971,9,2,13,466,2017-11-08 13:05:48,0
+85150,15,1,9,430,2017-11-07 07:08:23,0
+66481,3,1,13,480,2017-11-09 11:45:21,0
+18546,3,1,13,280,2017-11-08 06:02:17,0
+93021,12,1,19,265,2017-11-09 00:42:06,0
+8292,15,1,13,111,2017-11-08 00:46:40,0
+5418,18,1,12,107,2017-11-08 13:36:40,0
+5147,3,1,41,409,2017-11-07 01:53:16,0
+49600,3,1,13,280,2017-11-08 00:34:09,0
+100929,2,1,13,435,2017-11-07 15:08:50,0
+90766,15,1,23,245,2017-11-08 03:00:54,0
+103241,18,1,19,439,2017-11-09 07:56:55,0
+255406,3,1,13,317,2017-11-08 08:29:32,0
+126080,28,1,19,135,2017-11-07 03:55:11,0
+338711,18,1,12,376,2017-11-09 00:45:36,0
+39782,24,1,58,105,2017-11-08 15:04:53,0
+195049,2,1,16,237,2017-11-09 07:32:24,0
+69716,9,1,19,244,2017-11-09 11:11:36,0
+32665,11,1,13,325,2017-11-08 01:11:55,0
+206399,26,1,19,266,2017-11-08 10:39:30,0
+90891,17,1,12,128,2017-11-09 14:32:50,0
+103358,3,1,13,424,2017-11-08 12:34:26,0
+58472,64,1,9,459,2017-11-08 17:13:22,0
+19014,3,1,19,137,2017-11-06 23:36:37,0
+102225,3,1,13,115,2017-11-08 01:13:15,0
+115265,2,1,13,237,2017-11-07 01:21:45,0
+124708,14,1,13,442,2017-11-09 11:00:53,0
+5348,3,1,17,424,2017-11-07 23:09:33,0
+47747,13,1,8,400,2017-11-08 17:48:38,0
+41082,1,1,19,134,2017-11-07 16:00:12,0
+133522,15,1,19,153,2017-11-07 13:24:33,0
+309581,9,1,13,232,2017-11-09 09:22:51,0
+121728,12,1,23,259,2017-11-07 08:45:03,0
+108075,9,1,13,244,2017-11-07 08:26:31,0
+46923,3,1,10,317,2017-11-08 08:42:48,0
+208265,14,1,16,439,2017-11-07 12:32:56,0
+103077,18,1,19,439,2017-11-07 14:36:49,0
+108056,13,1,16,400,2017-11-07 06:52:57,0
+79881,1,1,3,137,2017-11-07 14:37:21,0
+66305,12,1,13,245,2017-11-07 08:35:52,0
+124041,7,1,19,101,2017-11-09 06:50:09,0
+70253,9,1,19,134,2017-11-08 13:42:05,0
+173264,12,1,13,265,2017-11-07 00:48:10,0
+8215,12,1,19,178,2017-11-08 09:46:01,0
+31906,9,1,19,334,2017-11-08 04:10:27,0
+111340,3,1,20,442,2017-11-07 12:11:00,0
+152714,14,1,37,123,2017-11-07 22:08:58,0
+171690,18,1,13,439,2017-11-07 11:26:41,0
+4414,2,1,13,477,2017-11-09 11:38:42,0
+100212,15,1,20,480,2017-11-07 04:04:42,0
+62717,3,1,19,424,2017-11-06 23:30:57,0
+41307,9,1,17,466,2017-11-09 05:32:37,0
+39095,13,1,26,400,2017-11-07 13:12:39,0
+70532,3,1,20,280,2017-11-08 09:22:57,0
+66072,8,2,13,140,2017-11-07 05:33:39,0
+5348,11,1,13,325,2017-11-09 03:54:16,0
+235828,2,1,18,237,2017-11-09 01:31:34,0
+114133,3,1,19,409,2017-11-08 16:13:55,0
+34292,21,1,22,128,2017-11-09 13:58:50,0
+237039,3,1,22,442,2017-11-09 10:00:19,0
+33482,2,1,13,435,2017-11-08 01:37:45,0
+28140,15,1,19,245,2017-11-08 00:52:25,0
+108329,15,1,22,245,2017-11-07 19:08:42,0
+135218,23,1,18,153,2017-11-07 12:35:49,0
+124375,3,1,19,452,2017-11-08 04:57:16,0
+159228,12,1,13,140,2017-11-09 01:25:41,0
+122963,15,1,41,265,2017-11-09 03:33:46,0
+37490,29,2,17,343,2017-11-09 01:17:04,0
+73516,3,1,19,280,2017-11-08 12:42:25,0
+11110,150,1,13,110,2017-11-08 10:02:56,0
+7275,14,1,28,480,2017-11-08 17:35:46,0
+34380,15,1,13,245,2017-11-07 12:33:19,0
+66541,11,1,19,487,2017-11-07 09:30:52,0
+118252,19,0,24,347,2017-11-09 06:15:24,0
+73487,3,1,19,153,2017-11-07 22:30:06,0
+5314,15,1,19,245,2017-11-08 22:20:02,0
+30614,8,1,6,145,2017-11-09 09:55:06,0
+100383,11,1,13,173,2017-11-09 08:15:46,0
+5348,9,1,20,334,2017-11-09 06:52:14,0
+17149,14,1,19,439,2017-11-07 13:25:33,0
+32457,9,1,13,258,2017-11-08 05:18:48,0
+162678,18,1,13,121,2017-11-07 06:01:23,0
+23322,15,1,28,111,2017-11-08 01:01:28,0
+242551,12,1,17,140,2017-11-08 00:39:17,0
+116155,14,1,19,379,2017-11-07 22:26:37,0
+12109,2,1,8,469,2017-11-07 14:08:53,0
+47675,14,1,13,371,2017-11-08 00:13:15,0
+7813,3,1,19,205,2017-11-08 12:20:30,0
+22170,15,1,9,245,2017-11-08 02:34:51,0
+16444,8,1,13,145,2017-11-09 06:16:29,0
+104398,13,1,3,477,2017-11-09 03:53:09,0
+152137,2,1,35,237,2017-11-07 14:00:27,0
+75595,12,1,25,481,2017-11-09 08:04:15,0
+195548,1,1,25,115,2017-11-08 10:52:05,0
+102405,2,1,13,237,2017-11-08 03:51:48,0
+2254,15,1,13,315,2017-11-07 22:58:02,0
+554,13,1,19,477,2017-11-09 10:03:22,0
+74100,18,1,28,439,2017-11-07 04:26:25,0
+51992,9,1,19,234,2017-11-07 13:50:10,0
+119349,7,1,41,101,2017-11-09 05:07:04,0
+304407,3,1,35,182,2017-11-09 09:44:53,0
+118753,3,1,25,280,2017-11-09 01:20:36,0
+143980,2,1,36,435,2017-11-07 00:32:53,0
+45843,26,1,40,266,2017-11-08 01:32:28,0
+4633,18,1,20,121,2017-11-06 18:00:27,0
+123585,9,1,8,445,2017-11-09 07:06:46,0
+23714,37,1,18,21,2017-11-09 05:00:47,0
+361897,18,1,19,107,2017-11-09 15:21:07,0
+59086,3,1,41,409,2017-11-09 10:23:33,0
+33810,12,1,23,265,2017-11-07 07:18:34,0
+26995,2,1,35,435,2017-11-06 16:53:18,0
+122677,1,1,17,134,2017-11-07 16:40:57,0
+73887,9,1,13,232,2017-11-08 14:58:38,0
+67734,12,1,19,265,2017-11-08 16:01:11,0
+73487,2,1,19,237,2017-11-09 01:43:57,0
+120532,15,1,13,265,2017-11-09 15:20:52,0
+30587,2,1,19,469,2017-11-08 12:24:11,0
+145604,8,1,13,145,2017-11-09 09:33:38,0
+1713,3,1,18,424,2017-11-08 06:56:36,0
+105160,9,1,13,334,2017-11-08 05:28:57,0
+105560,15,1,11,265,2017-11-08 07:18:36,0
+21963,23,1,6,153,2017-11-07 15:07:40,0
+114220,12,1,17,178,2017-11-08 09:37:13,0
+55849,18,1,13,134,2017-11-09 03:19:02,0
+97744,15,1,19,412,2017-11-08 10:27:19,0
+135329,9,1,20,334,2017-11-07 06:17:25,0
+196609,17,1,13,128,2017-11-09 15:12:00,0
+42861,12,1,8,19,2017-11-08 00:21:04,0
+13479,13,1,19,449,2017-11-09 12:45:56,0
+10316,18,1,19,121,2017-11-07 06:05:33,0
+66655,9,1,13,466,2017-11-09 08:50:54,0
+18676,1,1,1,135,2017-11-08 01:22:17,0
+20021,20,1,35,259,2017-11-07 14:43:31,0
+41399,15,1,13,315,2017-11-08 03:13:18,0
+19090,18,1,19,107,2017-11-06 21:33:28,0
+20901,3,1,4,205,2017-11-07 02:33:43,0
+37892,25,1,47,259,2017-11-09 14:54:02,0
+39782,3,1,17,402,2017-11-09 06:24:44,0
+202204,18,3032,607,107,2017-11-06 23:50:32,0
+281109,3,1,35,317,2017-11-08 06:58:36,0
+64198,12,1,16,265,2017-11-07 11:49:43,0
+41805,18,1,19,134,2017-11-07 04:07:50,0
+10301,15,1,19,379,2017-11-06 16:41:10,0
+91797,64,1,19,459,2017-11-07 14:30:39,0
+83660,15,1,13,245,2017-11-08 03:39:09,0
+20242,8,1,35,145,2017-11-09 07:02:44,0
+248858,2,1,53,237,2017-11-08 01:19:12,0
+26987,3,1,17,489,2017-11-08 16:40:30,0
+64381,9,1,47,215,2017-11-09 14:09:34,0
+6189,28,1,6,135,2017-11-07 23:44:43,0
+68519,3,1,13,280,2017-11-08 01:01:05,0
+5348,9,1,19,134,2017-11-07 17:39:18,0
+116239,14,1,8,489,2017-11-08 00:24:40,0
+88881,13,1,13,400,2017-11-08 06:18:15,0
+182270,14,1,19,379,2017-11-07 08:37:10,0
+152714,28,1,1,135,2017-11-09 03:54:47,0
+50375,9,1,19,445,2017-11-08 10:12:42,0
+113293,9,1,19,134,2017-11-07 17:29:16,0
+54895,25,1,6,259,2017-11-08 13:45:53,0
+34561,18,1,32,449,2017-11-09 09:42:33,0
+64593,13,1,13,477,2017-11-07 01:31:53,0
+117898,18,1,52,107,2017-11-09 14:12:05,0
+126961,2,1,23,477,2017-11-08 16:28:08,0
+78903,11,1,17,319,2017-11-09 01:59:04,0
+181849,18,1,30,107,2017-11-08 12:24:27,0
+40216,26,1,19,477,2017-11-09 01:06:07,0
+92352,3,1,13,205,2017-11-08 07:02:38,0
+4486,14,1,27,442,2017-11-08 03:58:44,0
+125222,64,1,19,459,2017-11-07 02:58:17,0
+74061,12,1,19,245,2017-11-07 15:56:35,0
+12903,14,1,28,134,2017-11-09 04:15:28,0
+197705,12,1,22,178,2017-11-06 17:59:56,0
+74497,2,1,26,237,2017-11-09 01:21:09,0
+5348,12,1,16,205,2017-11-07 12:29:20,0
+114220,3,1,13,137,2017-11-07 20:36:11,0
+44570,22,1,13,496,2017-11-07 01:06:14,0
+105855,2,1,10,364,2017-11-09 11:56:08,0
+22780,18,1,18,107,2017-11-08 06:43:24,0
+69394,15,1,19,140,2017-11-08 04:46:12,0
+97030,1,1,13,115,2017-11-09 05:57:21,0
+246044,15,1,13,245,2017-11-08 16:22:03,0
+108913,18,1,13,107,2017-11-08 01:46:15,0
+148947,2,1,19,236,2017-11-07 04:10:52,0
+44327,18,1,19,107,2017-11-09 06:56:58,0
+4205,18,1,13,107,2017-11-07 16:04:01,0
+25150,23,1,3,153,2017-11-07 04:22:44,0
+284968,14,1,13,379,2017-11-09 13:59:38,0
+90852,12,1,15,245,2017-11-09 05:10:58,0
+251954,24,1,23,105,2017-11-08 09:34:50,0
+123181,64,1,10,459,2017-11-08 13:25:26,0
+124016,3,1,19,115,2017-11-08 00:50:59,0
+116644,15,1,19,265,2017-11-09 13:10:56,0
+111102,2,1,25,477,2017-11-08 07:42:26,0
+61942,18,1,13,376,2017-11-08 10:13:33,0
+83206,18,1,13,121,2017-11-08 15:56:16,0
+102897,1,1,19,135,2017-11-08 04:45:16,0
+29660,3,1,13,424,2017-11-09 15:38:53,0
+179788,3,1,13,379,2017-11-07 03:26:16,0
+204574,3,1,3,280,2017-11-07 05:50:30,0
+91885,18,1,18,107,2017-11-09 09:32:31,0
+81698,3,1,19,280,2017-11-09 00:43:36,0
+81973,9,1,10,442,2017-11-07 14:40:29,0
+146001,12,2,16,145,2017-11-06 22:56:44,0
+107155,15,1,19,111,2017-11-08 12:45:57,0
+111517,23,1,55,153,2017-11-06 22:13:53,0
+53218,15,1,6,245,2017-11-07 04:31:05,0
+146180,12,1,9,178,2017-11-09 09:56:02,0
+67800,18,1,13,121,2017-11-07 14:06:57,0
+105569,2,1,13,205,2017-11-07 00:39:27,0
+182243,3,1,15,280,2017-11-09 05:00:50,0
+17411,15,1,6,138,2017-11-09 08:45:13,0
+77535,26,1,8,266,2017-11-08 00:45:55,0
+5729,7,1,25,101,2017-11-09 10:44:05,0
+38527,2,1,17,477,2017-11-09 06:24:01,0
+90855,9,1,17,489,2017-11-07 15:43:18,0
+105981,12,1,6,178,2017-11-07 08:16:42,0
+48212,12,2,9,178,2017-11-09 07:14:11,0
+79787,18,1,18,107,2017-11-06 17:17:06,0
+32265,2,1,37,219,2017-11-07 14:32:15,0
+105587,14,1,19,401,2017-11-07 12:13:35,0
+13291,18,1,19,439,2017-11-08 07:26:36,0
+7146,15,1,19,265,2017-11-09 07:15:41,0
+85208,18,1,18,107,2017-11-08 11:38:56,0
+71654,3,1,22,137,2017-11-09 08:02:41,0
+79751,26,1,10,121,2017-11-09 07:14:26,0
+2770,19,0,0,213,2017-11-08 10:00:53,0
+195038,9,1,18,442,2017-11-07 10:57:41,0
+48359,18,1,20,107,2017-11-07 08:07:08,0
+48240,3,1,19,30,2017-11-09 13:50:23,0
+93054,3,1,19,153,2017-11-07 22:09:19,0
+30077,3,2,49,211,2017-11-08 06:36:05,0
+58097,12,1,32,328,2017-11-07 07:44:46,0
+28336,2,1,19,452,2017-11-09 05:10:36,0
+85045,2,1,19,477,2017-11-07 00:55:32,0
+178105,18,1,13,121,2017-11-07 02:15:09,0
+43881,15,1,19,265,2017-11-09 01:33:48,0
+50867,25,1,13,259,2017-11-07 10:57:47,0
+111025,26,1,13,121,2017-11-07 14:40:18,0
+80770,13,1,13,477,2017-11-09 05:42:48,0
+209039,13,1,13,477,2017-11-08 15:31:02,0
+3239,1,1,13,134,2017-11-09 11:17:08,0
+139448,3,1,37,280,2017-11-07 09:11:35,0
+5348,7,1,43,101,2017-11-08 05:39:44,0
+104397,12,1,37,19,2017-11-07 10:43:53,0
+20915,28,1,14,135,2017-11-09 12:28:10,0
+95766,13,1,37,477,2017-11-07 12:53:48,0
+58288,12,1,6,140,2017-11-06 18:09:41,0
+287492,9,1,13,466,2017-11-08 12:35:12,0
+77318,15,1,13,153,2017-11-07 16:11:43,0
+123563,11,1,27,173,2017-11-08 01:14:40,0
+117753,1,1,25,153,2017-11-07 03:32:23,0
+122199,11,1,22,319,2017-11-06 23:23:54,0
+106665,3,1,8,409,2017-11-07 12:06:20,0
+180643,18,1,41,439,2017-11-09 04:57:25,0
+77901,15,1,6,379,2017-11-09 00:26:38,0
+22346,12,1,8,245,2017-11-09 14:49:51,0
+124325,2,1,19,477,2017-11-09 05:55:44,0
+76616,12,1,9,178,2017-11-07 13:42:18,0
+81812,13,1,18,477,2017-11-09 03:46:06,0
+37417,3,1,17,280,2017-11-08 07:51:44,0
+92735,9,2,13,134,2017-11-09 01:19:04,0
+37323,9,1,19,334,2017-11-09 06:36:23,0
+18439,15,1,19,386,2017-11-07 11:04:08,0
+108180,15,1,37,278,2017-11-08 04:44:56,0
+14617,12,1,13,245,2017-11-08 03:51:45,0
+44000,18,1,6,107,2017-11-09 01:32:18,0
+80676,11,1,11,122,2017-11-08 12:04:01,0
+105475,56,1,12,406,2017-11-09 06:53:56,0
+53978,3,1,6,489,2017-11-09 12:19:22,0
+44615,2,1,9,205,2017-11-08 12:14:17,0
+14223,2,1,41,122,2017-11-09 02:01:29,0
+152323,24,1,19,105,2017-11-07 14:05:23,0
+146789,9,1,13,134,2017-11-08 16:38:15,0
+115445,11,2,63,319,2017-11-08 05:04:18,0
+23260,9,1,34,127,2017-11-09 11:25:07,0
+245978,12,1,25,259,2017-11-09 15:00:26,0
+24586,9,1,6,334,2017-11-07 06:32:52,0
+43215,2,1,25,452,2017-11-08 12:53:04,0
+80428,3,1,13,280,2017-11-08 00:47:58,0
+47168,2,1,19,435,2017-11-08 01:40:03,0
+49677,29,1,19,343,2017-11-07 12:33:35,0
+27639,15,1,22,111,2017-11-09 11:56:39,0
+76710,3,1,19,371,2017-11-09 14:30:57,0
+47824,35,1,19,21,2017-11-09 03:58:40,1
+24943,3,1,22,480,2017-11-09 14:33:21,0
+183108,2,1,13,364,2017-11-09 02:41:02,0
+32539,9,1,19,215,2017-11-08 15:51:08,0
+69362,1,1,20,134,2017-11-07 00:08:41,0
+59145,3,1,25,173,2017-11-08 00:30:59,0
+111799,12,1,15,265,2017-11-09 11:04:31,0
+160476,3,1,13,442,2017-11-07 00:21:55,0
+171240,1,1,13,178,2017-11-07 10:38:15,0
+36150,3,1,13,452,2017-11-07 20:34:00,0
+27988,1,1,18,134,2017-11-07 00:51:22,0
+167814,3,1,9,466,2017-11-07 14:33:12,0
+1313,3,1,20,280,2017-11-08 09:01:48,0
+58637,2,1,19,237,2017-11-09 01:43:06,0
+75830,3,1,19,409,2017-11-07 23:48:43,0
+334024,11,1,25,487,2017-11-09 04:44:49,0
+84972,8,1,13,259,2017-11-07 00:57:15,0
+40314,18,1,13,121,2017-11-09 09:42:43,0
+15643,1,1,37,153,2017-11-08 03:59:42,0
+91574,2,1,18,236,2017-11-08 08:00:02,0
+44673,3,1,19,280,2017-11-08 04:40:30,0
+71149,9,1,1,466,2017-11-09 10:23:35,0
+72805,3,1,13,130,2017-11-07 04:59:23,0
+71789,2,1,23,236,2017-11-08 05:23:18,0
+125050,5,1,19,377,2017-11-09 09:16:44,0
+35696,3,1,37,280,2017-11-09 08:27:07,0
+6553,3,1,19,379,2017-11-08 06:50:25,0
+7441,3,1,42,130,2017-11-07 03:34:04,0
+204700,8,1,13,145,2017-11-07 10:01:50,0
+10265,3,1,13,424,2017-11-08 03:11:33,0
+37628,21,1,10,128,2017-11-07 05:31:29,0
+66898,21,1,19,232,2017-11-07 16:33:57,0
+64024,3,1,15,280,2017-11-09 04:12:31,0
+8451,2,1,15,219,2017-11-09 04:55:22,0
+41052,64,1,13,459,2017-11-07 04:10:24,0
+210945,15,1,6,386,2017-11-07 00:17:19,0
+33016,2,1,27,469,2017-11-07 07:16:50,0
+47251,2,1,19,477,2017-11-09 05:54:35,0
+15630,1,1,19,135,2017-11-07 17:24:23,0
+117269,8,1,9,145,2017-11-08 17:56:16,0
+29835,12,1,31,178,2017-11-07 01:32:20,0
+33777,3,1,12,173,2017-11-08 07:56:48,0
+96964,12,1,19,178,2017-11-09 05:21:27,0
+39175,3,1,22,205,2017-11-07 06:32:03,0
+98317,26,1,42,477,2017-11-09 13:40:28,0
+73299,18,1,34,107,2017-11-08 03:25:33,0
+280302,14,1,19,349,2017-11-08 01:18:58,0
+8019,12,1,25,140,2017-11-09 01:13:25,0
+67779,3,2,3,135,2017-11-09 08:09:07,0
+24985,2,1,13,205,2017-11-07 11:06:04,0
+153133,15,1,13,265,2017-11-07 08:17:52,0
+90743,3,1,15,452,2017-11-08 03:04:44,0
+325602,4,1,13,101,2017-11-09 12:09:24,0
+119349,8,1,9,145,2017-11-09 08:50:13,0
+114235,15,1,19,430,2017-11-09 02:19:51,0
+50165,2,1,22,236,2017-11-07 00:38:52,0
+2938,3,1,25,442,2017-11-08 00:57:48,0
+54612,15,1,13,153,2017-11-07 11:09:29,0
+63932,12,1,19,178,2017-11-07 13:20:00,0
+31823,14,1,16,439,2017-11-08 15:44:17,0
+62239,18,1,53,134,2017-11-08 15:50:29,0
+119349,7,1,47,101,2017-11-09 13:31:58,0
+95155,13,1,18,477,2017-11-08 09:12:58,0
+59233,12,1,13,265,2017-11-09 08:26:53,0
+179344,15,1,20,259,2017-11-07 15:44:12,0
+13639,15,1,20,245,2017-11-08 06:59:15,0
+54524,2,2,13,205,2017-11-06 23:07:33,0
+11272,7,1,19,101,2017-11-07 10:18:28,0
+31723,3,1,19,153,2017-11-07 03:49:22,0
+119304,28,1,15,135,2017-11-09 08:06:19,0
+27035,3,1,32,130,2017-11-07 12:42:01,0
+75885,2,1,9,237,2017-11-08 10:15:23,0
+41584,2,1,13,243,2017-11-09 00:01:53,0
+102677,3,1,19,379,2017-11-08 08:00:46,0
+73189,18,1,25,107,2017-11-08 08:58:57,0
+279182,12,1,25,409,2017-11-08 11:14:14,0
+4033,1,1,11,377,2017-11-08 03:21:42,0
+115681,7,1,13,101,2017-11-09 04:21:17,0
+232890,20,1,26,259,2017-11-08 02:44:03,0
+252629,18,1,13,107,2017-11-08 15:07:35,0
+78564,3,1,19,280,2017-11-09 01:29:30,0
+19403,7,1,19,101,2017-11-09 06:10:58,0
+86544,3,1,10,280,2017-11-09 06:17:46,0
+71591,15,1,19,130,2017-11-09 07:35:11,0
+56245,12,1,23,140,2017-11-07 06:20:12,0
+153553,9,1,28,442,2017-11-08 02:37:50,0
+96948,3,1,13,135,2017-11-06 22:33:15,0
+104791,9,1,13,466,2017-11-09 11:37:14,0
+5718,14,2,40,113,2017-11-09 14:28:08,0
+103068,18,1,19,107,2017-11-09 04:55:16,0
+181641,9,1,17,466,2017-11-08 20:03:25,0
+107739,5,1,25,377,2017-11-09 06:32:53,0
+14301,29,1,19,213,2017-11-08 01:15:38,0
+37774,9,1,8,215,2017-11-09 14:41:47,0
+5286,15,1,13,265,2017-11-06 22:31:40,0
+69995,18,1,10,107,2017-11-09 12:49:46,0
+77182,2,1,13,452,2017-11-09 02:50:40,0
+114795,2,1,22,236,2017-11-08 21:03:08,0
+257422,2,1,13,219,2017-11-08 15:16:12,0
+30228,3,1,36,280,2017-11-09 07:33:38,0
+58158,32,1,18,376,2017-11-07 09:12:20,0
+57407,15,1,6,480,2017-11-07 09:16:03,0
+5348,12,1,3,205,2017-11-09 05:29:22,0
+105247,3,1,10,489,2017-11-09 02:57:52,0
+181849,9,1,20,107,2017-11-09 04:49:43,0
+177070,3,1,13,280,2017-11-08 02:45:30,0
+10797,15,1,6,245,2017-11-08 14:05:59,0
+271198,18,1,13,107,2017-11-09 03:10:42,0
+42013,3,1,13,205,2017-11-07 11:23:03,0
+13104,11,1,19,481,2017-11-09 15:58:59,0
+218415,19,0,0,213,2017-11-08 10:59:35,1
+172223,2,1,13,236,2017-11-09 12:11:58,0
+108153,47,1,1,484,2017-11-09 11:52:31,0
+141043,1,1,41,153,2017-11-09 03:09:37,0
+69667,2,1,19,237,2017-11-09 04:58:06,0
+10558,25,1,22,259,2017-11-09 13:32:44,0
+31542,12,1,18,205,2017-11-08 20:32:27,0
+2770,3,2,20,137,2017-11-06 20:12:37,0
+104906,9,1,19,334,2017-11-08 23:04:14,0
+256260,12,1,42,265,2017-11-08 05:02:10,0
+89429,27,1,13,153,2017-11-08 10:25:00,0
+347595,72,1,27,101,2017-11-09 13:20:47,0
+55726,18,1,19,121,2017-11-07 06:07:54,0
+53929,2,1,19,205,2017-11-07 22:11:56,0
+27389,4,1,8,101,2017-11-08 02:16:11,0
+176102,3,1,19,115,2017-11-06 17:57:42,0
+40077,15,1,20,386,2017-11-07 13:51:07,0
+76966,14,1,17,401,2017-11-07 06:12:42,0
+83045,2,1,1,477,2017-11-08 04:50:30,0
+81287,21,1,19,128,2017-11-07 07:28:17,0
+7709,18,3543,748,107,2017-11-07 23:35:12,0
+103108,26,1,22,121,2017-11-06 23:44:00,0
+86926,3,1,13,466,2017-11-09 07:02:01,0
+42035,2,1,10,236,2017-11-09 02:35:56,0
+110667,14,1,9,401,2017-11-09 00:16:35,0
+42176,3,1,20,280,2017-11-09 05:45:13,0
+24157,2,1,19,212,2017-11-09 13:16:34,0
+70343,3,1,28,452,2017-11-09 01:28:33,0
+111702,13,1,19,477,2017-11-08 06:59:27,0
+115340,3,1,13,280,2017-11-09 06:03:25,0
+61293,18,1,13,134,2017-11-07 06:52:48,0
+99377,3,1,22,280,2017-11-07 05:44:32,0
+32367,3,1,13,280,2017-11-09 05:35:01,0
+19778,15,1,41,111,2017-11-07 09:08:59,0
+21126,12,1,19,140,2017-11-09 12:07:03,0
+100694,1,1,19,124,2017-11-09 07:12:29,0
+90891,3,1,9,466,2017-11-09 07:14:45,0
+182971,3,1,19,130,2017-11-06 18:17:55,0
+41666,15,1,19,245,2017-11-07 15:23:50,0
+39453,3,1,13,280,2017-11-09 03:48:31,0
+211936,12,1,25,265,2017-11-07 11:18:36,0
+156538,3,1,18,280,2017-11-08 02:01:22,0
+44673,2,1,19,435,2017-11-07 18:06:43,0
+58254,25,1,22,259,2017-11-06 23:51:04,0
+93810,3,1,25,442,2017-11-07 02:02:04,0
+2660,26,1,32,477,2017-11-08 15:09:35,0
+116499,26,1,13,266,2017-11-07 03:24:44,0
+70421,9,1,15,232,2017-11-09 03:47:43,0
+55903,1,1,22,134,2017-11-07 01:19:29,0
+8469,21,1,17,128,2017-11-08 09:33:21,0
+109676,12,1,22,245,2017-11-08 13:47:16,0
+119372,20,1,3,478,2017-11-09 08:26:41,0
+7172,3,1,22,280,2017-11-09 05:10:52,0
+50747,9,1,20,334,2017-11-09 11:14:42,0
+17899,11,1,12,325,2017-11-08 14:49:34,0
+88971,15,1,15,412,2017-11-08 12:42:46,0
+52766,12,1,19,178,2017-11-08 08:17:34,0
+73031,3,1,11,417,2017-11-07 15:11:46,0
+345694,19,281,76,213,2017-11-08 21:52:44,0
+13016,18,1,23,376,2017-11-09 09:34:39,0
+240210,2,1,10,205,2017-11-08 12:59:03,0
+166433,12,1,15,259,2017-11-09 10:00:32,0
+175837,12,1,19,245,2017-11-07 14:31:34,0
+137052,3,1,19,280,2017-11-08 10:12:04,0
+110071,18,1,15,107,2017-11-07 09:54:17,0
+115974,2,1,19,452,2017-11-09 09:03:33,0
+3488,12,1,19,178,2017-11-08 23:44:44,0
+108341,2,1,13,205,2017-11-08 03:45:33,0
+220072,2,1,18,237,2017-11-08 00:51:27,0
+152009,23,1,22,153,2017-11-07 09:52:58,0
+123952,64,1,22,459,2017-11-07 17:26:59,0
+39736,3,1,2,205,2017-11-08 03:23:46,0
+123080,18,1,32,107,2017-11-09 05:19:43,0
+126339,13,1,20,477,2017-11-08 10:28:22,0
+42298,12,1,19,481,2017-11-09 12:10:35,0
+59159,13,1,13,477,2017-11-07 09:16:17,0
+98944,2,1,32,237,2017-11-08 05:24:08,0
+26751,26,1,19,477,2017-11-09 07:03:03,0
+119531,15,2,15,245,2017-11-08 18:44:30,0
+69577,14,1,4,439,2017-11-07 11:28:08,0
+6627,3,1,22,280,2017-11-07 10:58:33,0
+86277,12,1,13,259,2017-11-06 18:02:21,0
+178822,2,1,19,219,2017-11-08 12:20:13,0
+17149,14,2,184,446,2017-11-09 14:09:36,0
+31385,23,1,15,153,2017-11-09 13:32:23,0
+315844,14,1,18,439,2017-11-08 20:09:49,0
+26905,15,1,37,245,2017-11-08 18:27:46,0
+183647,7,1,13,101,2017-11-08 16:02:28,0
+14063,12,1,13,409,2017-11-08 16:15:36,0
+128885,3,1,1,417,2017-11-08 00:47:00,0
+73516,12,1,13,326,2017-11-08 17:57:06,0
+81013,27,1,13,153,2017-11-08 03:44:14,0
+77048,3,1,10,280,2017-11-08 12:57:41,0
+260928,3,1,13,19,2017-11-08 21:35:25,0
+38634,9,1,17,334,2017-11-07 16:37:33,0
+7434,12,1,13,265,2017-11-09 04:54:07,0
+48170,12,2,18,259,2017-11-07 05:51:19,0
+66541,14,1,18,463,2017-11-07 05:04:48,0
+194983,21,1,19,128,2017-11-06 23:53:23,0
+212090,15,1,18,315,2017-11-08 13:17:05,0
+268835,3,1,18,135,2017-11-08 07:14:44,0
+25695,3,1,19,280,2017-11-07 06:52:44,0
+287070,9,1,16,334,2017-11-09 02:45:38,0
+199694,27,1,8,153,2017-11-06 16:56:32,0
+4511,3,1,13,371,2017-11-08 00:19:29,0
+56313,15,1,41,3,2017-11-09 09:42:04,0
+90948,12,1,8,265,2017-11-08 09:16:07,0
+178581,3,1,35,280,2017-11-08 15:29:05,0
+158713,2,1,18,212,2017-11-06 20:24:51,0
+55047,64,1,13,459,2017-11-09 09:53:23,0
+39045,12,1,26,265,2017-11-07 09:17:31,0
+32788,2,1,36,477,2017-11-09 09:28:35,0
+22517,6,1,20,125,2017-11-07 13:16:16,0
+353818,15,1,19,245,2017-11-08 18:31:56,0
+171832,18,1,53,134,2017-11-08 12:45:02,0
+92749,12,1,13,265,2017-11-08 23:28:33,0
+43855,15,1,19,245,2017-11-07 16:01:00,0
+1313,8,1,22,145,2017-11-09 11:09:46,0
+114802,19,0,29,213,2017-11-09 04:07:47,0
+145008,3,1,35,205,2017-11-09 08:46:40,0
+7595,15,1,41,265,2017-11-07 05:18:19,0
+128400,9,1,17,442,2017-11-07 16:23:56,0
+128610,1,1,8,17,2017-11-07 13:41:55,0
+77048,6,1,17,125,2017-11-08 10:05:11,0
+101501,8,1,13,145,2017-11-08 23:52:51,0
+40736,8,1,13,145,2017-11-06 23:20:33,0
+40849,12,1,19,140,2017-11-09 14:20:13,0
+32457,15,1,19,245,2017-11-07 01:49:38,0
+95766,1,1,7,134,2017-11-07 11:26:25,0
+75270,3,1,19,442,2017-11-09 12:09:49,0
+28652,15,1,17,245,2017-11-07 02:54:14,0
+290599,2,1,13,435,2017-11-09 06:08:12,0
+200707,14,1,17,463,2017-11-09 08:43:35,0
+114276,9,1,19,334,2017-11-07 16:22:29,0
+168961,21,1,9,128,2017-11-09 04:40:01,0
+87783,9,1,13,334,2017-11-08 09:16:32,0
+106200,8,1,13,145,2017-11-09 04:48:58,0
+168352,14,1,28,401,2017-11-09 09:11:14,0
+35650,3,1,13,280,2017-11-08 05:31:03,0
+139542,2,1,13,469,2017-11-06 23:09:14,0
+77107,12,1,22,178,2017-11-09 14:35:45,0
+23496,2,1,18,237,2017-11-07 22:59:25,0
+37417,11,1,19,319,2017-11-06 23:48:31,0
+112329,12,1,13,265,2017-11-08 14:39:09,0
+10572,12,1,18,340,2017-11-09 15:35:35,0
+108160,3,2,36,280,2017-11-08 07:20:37,0
+39349,21,1,17,128,2017-11-09 10:31:54,0
+65863,12,1,37,265,2017-11-07 05:15:29,0
+97103,12,1,13,245,2017-11-07 03:31:33,0
+39818,23,1,19,153,2017-11-09 14:13:05,0
+48004,20,1,16,259,2017-11-08 03:44:41,0
+7664,21,1,19,128,2017-11-07 01:12:02,0
+199564,3,2,11,442,2017-11-08 12:19:52,0
+173141,9,2,66,107,2017-11-09 12:01:22,0
+5348,15,1,13,140,2017-11-06 16:17:07,0
+172503,14,1,13,442,2017-11-07 12:31:22,0
+7146,3,1,41,205,2017-11-07 09:20:10,0
+98045,3,1,32,211,2017-11-08 08:50:39,0
+99856,44,3543,748,347,2017-11-08 13:50:05,0
+88781,15,1,13,278,2017-11-07 06:27:29,0
+47083,2,1,16,219,2017-11-08 23:52:28,0
+51967,12,1,19,178,2017-11-08 10:51:31,0
+145031,28,1,13,135,2017-11-07 14:07:45,0
+34432,12,1,10,19,2017-11-07 00:38:44,0
+77813,15,1,14,245,2017-11-08 08:09:27,0
+2189,94,1,13,361,2017-11-08 02:44:29,0
+40022,12,2,22,265,2017-11-07 03:04:12,0
+17477,3,1,28,205,2017-11-08 23:54:17,0
+114314,12,1,17,497,2017-11-07 04:35:22,0
+202016,12,1,12,105,2017-11-07 01:08:12,0
+52043,15,1,19,245,2017-11-08 14:27:09,0
+145838,3,1,8,280,2017-11-08 00:33:48,0
+99944,2,1,19,237,2017-11-09 01:13:48,0
+81629,18,1,17,107,2017-11-08 13:00:01,0
+58404,15,1,19,3,2017-11-06 19:46:53,0
+77553,14,1,19,480,2017-11-09 00:02:53,0
+194008,14,1,19,489,2017-11-06 23:10:47,0
+50696,14,1,17,480,2017-11-07 03:47:59,0
+38628,8,1,19,145,2017-11-09 09:06:27,0
+134034,23,1,19,153,2017-11-07 03:29:24,0
+73487,3,1,13,153,2017-11-09 05:16:09,0
+39209,2,1,13,212,2017-11-08 17:26:54,0
+12103,9,1,14,466,2017-11-09 09:04:13,0
+92749,12,1,13,259,2017-11-07 09:26:01,0
+164320,15,1,20,480,2017-11-07 07:14:41,0
+167166,3,1,19,135,2017-11-07 09:42:57,0
+77983,9,1,13,107,2017-11-08 22:49:31,0
+12091,11,1,8,481,2017-11-08 09:42:18,0
+120669,3,1,25,280,2017-11-09 01:19:32,0
+208937,18,1,19,134,2017-11-08 14:03:21,0
+350411,9,1,8,127,2017-11-09 15:37:59,0
+89427,8,1,8,145,2017-11-09 12:27:49,0
+108872,6,1,25,125,2017-11-09 09:17:29,0
+5314,12,1,13,265,2017-11-08 09:29:44,0
+69297,14,1,19,401,2017-11-09 13:07:04,0
+128120,21,1,1,128,2017-11-07 09:59:00,0
+87073,12,1,52,259,2017-11-07 03:37:35,0
+14116,23,1,18,153,2017-11-08 02:45:46,0
+41229,8,1,15,145,2017-11-07 14:16:25,0
+91641,15,1,19,130,2017-11-08 02:35:32,0
+118056,9,1,17,232,2017-11-07 05:42:42,0
+29346,109,0,38,347,2017-11-07 16:26:18,0
+100494,18,1,56,134,2017-11-09 01:54:02,0
+105292,3,2,79,402,2017-11-08 22:43:00,0
+70975,12,1,1,424,2017-11-07 00:23:53,0
+6814,9,1,37,466,2017-11-09 14:24:15,0
+100275,15,1,13,315,2017-11-09 15:28:23,0
+58961,15,1,12,430,2017-11-07 12:48:18,0
+162684,12,1,13,174,2017-11-07 00:47:32,0
+56370,12,1,19,178,2017-11-08 02:34:13,0
+97300,14,1,19,379,2017-11-09 00:06:21,0
+107569,23,1,18,153,2017-11-08 00:27:29,0
+51571,2,1,19,212,2017-11-09 03:28:06,0
+43827,3,1,1,211,2017-11-08 13:29:12,0
+81187,9,1,16,244,2017-11-08 21:57:51,0
+58982,3,1,13,280,2017-11-08 16:44:45,0
+71428,3,1,19,280,2017-11-08 05:35:25,0
+80539,3,1,17,115,2017-11-08 22:58:25,0
+32453,18,3543,748,107,2017-11-07 16:15:09,0
+10392,12,1,15,340,2017-11-09 08:34:30,0
+234802,11,1,19,481,2017-11-08 15:09:49,0
+71475,19,114,0,213,2017-11-07 04:39:50,0
+32487,15,1,19,3,2017-11-09 13:10:06,0
+48671,12,1,13,265,2017-11-09 15:34:17,0
+51464,2,1,8,122,2017-11-06 23:49:13,0
+82754,9,1,23,215,2017-11-06 21:08:59,0
+71488,3,1,12,135,2017-11-07 01:37:02,0
+87392,12,1,16,265,2017-11-09 09:28:11,0
+68381,18,1,23,439,2017-11-06 20:28:02,0
+79857,18,1,13,107,2017-11-06 17:12:10,0
+2334,18,3032,607,107,2017-11-07 09:35:42,0
+97073,7,1,18,101,2017-11-09 12:53:39,0
+106493,12,1,14,265,2017-11-09 11:03:33,0
+19054,6,1,13,125,2017-11-07 03:38:58,0
+54722,12,1,11,245,2017-11-07 20:39:34,0
+137780,12,1,13,409,2017-11-08 13:11:46,0
+126297,2,1,18,477,2017-11-07 15:10:32,0
+40815,2,1,18,469,2017-11-08 23:16:01,0
+101340,9,1,17,442,2017-11-09 13:00:35,0
+10973,23,1,47,153,2017-11-08 02:22:52,0
+8539,36,1,19,373,2017-11-07 15:22:11,0
+62688,25,1,19,259,2017-11-08 13:58:29,0
+13104,3,1,1,280,2017-11-09 03:45:55,0
+111251,14,1,37,467,2017-11-06 18:28:01,0
+20184,6,1,13,459,2017-11-09 15:28:31,0
+77257,2,1,18,477,2017-11-07 04:53:56,0
+252710,9,1,22,258,2017-11-08 00:06:38,0
+82843,12,2,15,178,2017-11-08 02:59:31,0
+5348,2,1,53,212,2017-11-07 15:22:35,0
+4754,7,1,18,101,2017-11-09 04:34:51,0
+69032,12,2,15,140,2017-11-09 11:17:23,0
+6641,18,1,18,107,2017-11-07 16:20:07,0
+53454,26,1,13,477,2017-11-09 14:21:13,0
+345228,14,2,19,118,2017-11-09 13:45:51,0
+226253,18,1,19,121,2017-11-08 05:39:09,0
+24932,14,1,19,160,2017-11-09 14:32:28,0
+49553,2,1,6,477,2017-11-08 10:25:54,0
+139835,9,1,19,334,2017-11-08 00:19:40,0
+2666,2,1,53,219,2017-11-09 04:12:57,0
+59969,12,1,19,328,2017-11-09 04:38:49,0
+79718,18,1,13,134,2017-11-07 23:55:41,0
+70491,2,1,13,477,2017-11-09 08:24:38,0
+77866,9,1,19,466,2017-11-09 13:52:00,0
+113454,1,1,43,124,2017-11-07 14:30:24,0
+176165,9,1,19,215,2017-11-09 01:41:43,0
+54910,20,1,12,478,2017-11-09 13:55:26,0
+364719,18,1,15,107,2017-11-09 12:17:44,0
+265592,64,1,13,459,2017-11-08 05:32:36,0
+90891,12,1,19,178,2017-11-08 08:56:16,0
+78955,3,1,19,317,2017-11-08 09:00:43,0
+271769,2,1,23,243,2017-11-09 11:52:25,0
+47132,24,1,42,105,2017-11-07 03:48:04,0
+114490,2,1,19,477,2017-11-07 04:28:50,0
+274915,19,154,50,282,2017-11-08 00:46:01,0
+8749,26,1,9,121,2017-11-07 10:11:42,0
+93587,15,1,9,430,2017-11-08 02:20:05,0
+18053,6,1,18,459,2017-11-09 12:09:17,0
+54841,9,1,34,489,2017-11-09 15:29:32,0
+42237,12,1,13,265,2017-11-07 03:03:01,0
+68996,9,1,6,489,2017-11-07 17:12:19,0
+72371,14,1,25,489,2017-11-07 03:22:58,0
+99150,29,1,6,213,2017-11-09 12:51:28,0
+73516,2,2,18,477,2017-11-08 16:29:38,0
+324,14,1,13,208,2017-11-08 01:16:51,0
+23599,12,1,19,140,2017-11-08 10:50:58,0
+196573,18,1,13,107,2017-11-08 05:41:13,0
+215649,13,1,11,477,2017-11-08 08:31:06,0
+118048,64,1,3,459,2017-11-08 10:00:48,0
+63624,9,1,13,215,2017-11-08 06:00:05,0
+105292,2,1,6,205,2017-11-07 12:39:38,0
+15652,13,1,37,477,2017-11-07 06:57:15,0
+73487,3,1,19,409,2017-11-07 12:17:47,0
+30404,12,1,10,328,2017-11-09 12:04:15,0
+196,15,1,11,245,2017-11-07 15:01:25,0
+117867,18,1,19,121,2017-11-07 20:21:47,0
+85419,3,1,13,442,2017-11-07 03:44:46,0
+75539,15,1,10,245,2017-11-06 16:19:12,0
+200466,11,1,17,487,2017-11-07 01:47:37,0
+295474,9,1,13,215,2017-11-09 03:27:25,0
+37485,3,1,13,442,2017-11-08 09:14:13,0
+39328,2,1,28,236,2017-11-09 03:29:12,0
+105811,14,1,13,480,2017-11-08 14:55:29,0
+8718,26,1,19,121,2017-11-07 09:10:09,0
+201182,3,1,13,211,2017-11-08 15:39:13,0
+14967,8,1,19,145,2017-11-08 00:22:40,0
+172187,9,1,13,442,2017-11-07 11:26:13,0
+112682,12,1,17,259,2017-11-09 15:14:47,0
+122207,12,1,19,245,2017-11-07 22:23:04,0
+31467,12,1,19,178,2017-11-09 05:55:09,0
+266205,2,1,19,236,2017-11-08 03:11:05,0
+96692,3,1,19,280,2017-11-08 10:15:12,0
+18332,9,2,10,466,2017-11-08 17:45:23,0
+209155,12,1,16,205,2017-11-06 17:17:59,0
+116001,7,1,10,101,2017-11-07 10:44:49,0
+91694,2,1,17,205,2017-11-07 00:03:04,0
+124195,14,1,13,489,2017-11-07 02:23:13,0
+2753,9,1,6,134,2017-11-08 00:37:05,0
+66240,18,1,19,107,2017-11-09 13:35:52,0
+61715,14,1,25,349,2017-11-07 07:31:34,0
+97500,18,1,17,107,2017-11-08 13:00:15,0
+5314,15,1,17,130,2017-11-08 12:43:35,0
+41565,18,1,8,107,2017-11-09 07:58:45,0
+41437,2,1,1,236,2017-11-09 04:15:13,0
+152918,21,1,27,128,2017-11-07 08:35:52,0
+84896,18,1,19,121,2017-11-09 14:47:32,0
+31387,18,1,16,107,2017-11-08 07:54:20,0
+124002,9,1,19,466,2017-11-06 23:48:51,0
+4724,12,1,13,265,2017-11-08 11:16:41,0
+88162,11,1,19,173,2017-11-08 06:36:54,0
+38922,9,1,13,145,2017-11-09 14:50:16,0
+77041,22,1,41,116,2017-11-08 00:25:01,0
+267477,2,1,8,435,2017-11-08 00:04:35,0
+69944,3,1,13,173,2017-11-09 00:10:49,0
+15343,14,1,13,463,2017-11-09 00:18:12,0
+5314,20,1,25,259,2017-11-09 04:31:01,0
+59091,9,1,13,134,2017-11-08 22:16:50,0
+152977,3,1,13,280,2017-11-09 04:49:35,0
+34562,26,1,13,121,2017-11-07 07:14:20,0
+73487,9,2,13,134,2017-11-07 01:32:49,0
+107364,12,1,13,178,2017-11-09 03:38:07,0
+88680,8,1,18,145,2017-11-08 00:37:15,0
+9781,3,1,13,409,2017-11-07 14:03:56,0
+42334,2,1,13,435,2017-11-07 04:46:28,0
+59397,2,1,41,435,2017-11-08 10:09:08,0
+40931,9,1,19,334,2017-11-09 03:14:37,0
+40530,3,1,13,280,2017-11-07 04:33:35,0
+99944,12,1,41,259,2017-11-09 02:06:17,0
+25097,1,2,18,134,2017-11-07 12:08:45,0
+6942,21,1,20,128,2017-11-09 12:11:11,0
+95173,3,1,16,280,2017-11-09 08:24:35,0
+90888,3,1,32,442,2017-11-08 23:30:27,0
+105519,9,1,13,442,2017-11-07 16:11:11,0
+84264,12,1,6,328,2017-11-09 08:06:12,0
+175706,3,1,13,280,2017-11-08 08:16:22,0
+123878,3,1,10,280,2017-11-08 13:55:09,0
+34894,18,3032,607,107,2017-11-07 12:59:15,0
+77866,9,1,15,127,2017-11-09 15:38:58,0
+48646,18,1,19,107,2017-11-09 10:34:37,0
+58705,9,1,3,127,2017-11-09 15:24:56,0
+65813,3,1,15,173,2017-11-08 23:58:47,0
+5912,15,1,19,245,2017-11-08 01:23:37,0
+108742,12,1,13,105,2017-11-09 01:03:42,0
+112496,2,1,22,219,2017-11-08 07:50:48,0
+92766,15,1,18,430,2017-11-07 07:32:23,0
+162296,15,1,8,245,2017-11-09 07:06:44,0
+148032,2,1,19,469,2017-11-08 08:54:28,0
+92447,62,1,20,21,2017-11-08 09:46:20,0
+191863,15,2,6,265,2017-11-08 11:59:03,0
+17262,2,1,22,236,2017-11-09 02:24:43,0
+23086,2,1,13,122,2017-11-09 11:05:10,0
+44595,3,1,19,424,2017-11-09 09:53:38,0
+4529,26,1,19,121,2017-11-07 09:28:14,0
+191084,15,1,23,386,2017-11-08 07:45:01,0
+116836,2,1,19,219,2017-11-08 00:52:39,0
+114235,2,1,9,477,2017-11-07 15:56:04,0
+34561,9,1,8,107,2017-11-09 14:49:37,0
+125222,25,1,41,259,2017-11-07 06:22:22,0
+97684,3,1,13,280,2017-11-08 15:00:49,0
+108919,12,1,19,245,2017-11-09 03:37:27,0
+95585,26,1,43,121,2017-11-07 15:48:38,0
+119393,21,1,13,128,2017-11-07 03:16:28,0
+151672,2,1,13,477,2017-11-08 08:23:36,0
+178323,3,1,19,280,2017-11-07 10:01:20,0
+21021,15,1,20,245,2017-11-07 08:58:38,0
+24865,12,1,18,259,2017-11-08 23:32:04,0
+8208,2,1,8,122,2017-11-08 08:28:58,0
+157529,12,1,19,178,2017-11-09 08:15:27,0
+21063,21,1,19,232,2017-11-09 13:01:17,0
+53960,2,1,3,205,2017-11-06 17:44:43,0
+121303,28,1,18,135,2017-11-07 05:52:48,0
+60314,2,1,22,205,2017-11-09 04:55:27,0
+149667,2,1,19,435,2017-11-08 02:34:27,0
+241520,26,1,19,121,2017-11-08 06:30:09,0
+91281,2,1,13,377,2017-11-07 23:57:57,0
+49407,15,1,19,140,2017-11-06 22:44:29,0
+45882,18,1,27,107,2017-11-08 09:48:07,0
+46923,25,1,13,259,2017-11-07 09:28:03,0
+80781,15,1,13,315,2017-11-07 13:23:01,0
+3994,13,1,13,477,2017-11-07 09:51:06,0
+139783,12,1,53,265,2017-11-08 08:51:21,0
+53408,6,1,19,459,2017-11-08 01:59:43,0
+95541,2,1,13,477,2017-11-06 17:05:55,0
+43793,18,1,13,107,2017-11-07 14:02:37,0
+44056,13,1,19,477,2017-11-09 04:02:20,0
+27132,12,1,13,245,2017-11-08 13:40:26,0
+87715,3,1,19,409,2017-11-08 14:47:32,0
+78136,12,1,31,340,2017-11-09 15:39:51,0
+73582,3,1,18,424,2017-11-08 22:57:14,0
+8718,18,1,8,107,2017-11-09 11:46:04,0
+209385,2,1,6,477,2017-11-09 02:04:41,0
+257714,12,1,15,178,2017-11-07 16:12:22,0
+18270,12,1,46,265,2017-11-09 14:40:35,0
+40488,14,1,16,463,2017-11-07 06:40:21,0
+70522,1,1,9,135,2017-11-07 00:36:54,0
+99579,2,1,13,452,2017-11-08 14:51:35,0
+255310,23,1,28,153,2017-11-09 03:42:23,0
+62803,19,0,29,213,2017-11-09 09:17:39,0
+13634,12,1,15,178,2017-11-09 11:01:04,0
+90485,12,1,19,205,2017-11-09 08:03:31,0
+95766,2,1,19,469,2017-11-08 13:34:43,0
+52094,8,1,22,140,2017-11-09 12:52:23,0
+4052,12,1,10,245,2017-11-09 05:07:49,0
+45374,9,1,36,127,2017-11-08 15:29:30,0
+46401,9,1,16,334,2017-11-08 15:24:50,0
+76460,14,1,20,134,2017-11-09 09:17:50,0
+5314,2,1,13,212,2017-11-09 10:13:00,0
+60442,18,1,18,134,2017-11-08 09:13:35,0
+95766,1,1,13,439,2017-11-09 07:01:05,0
+50296,12,1,13,178,2017-11-08 07:53:19,0
+123709,13,1,6,469,2017-11-09 06:46:54,0
+80437,15,1,19,3,2017-11-07 06:46:13,0
+84701,2,1,13,317,2017-11-08 10:06:22,0
+67628,18,1,19,107,2017-11-07 14:54:13,0
+100447,6,1,19,459,2017-11-07 08:18:12,0
+63080,2,1,6,435,2017-11-09 10:09:16,0
+81662,2,1,16,477,2017-11-08 16:43:20,0
+339662,5,1,43,113,2017-11-09 09:21:33,1
+23035,9,1,19,232,2017-11-09 08:59:15,0
+8378,18,1,13,121,2017-11-06 23:14:14,0
+43775,8,1,13,145,2017-11-07 01:13:58,0
+4414,9,1,14,253,2017-11-08 02:12:35,0
+100276,15,1,53,130,2017-11-07 09:26:26,0
+169001,15,1,27,315,2017-11-08 11:32:15,0
+106200,12,1,6,178,2017-11-09 03:20:49,0
+17845,18,1,22,107,2017-11-09 07:24:16,0
+122452,6,1,22,125,2017-11-09 06:41:16,0
+69873,9,1,1,466,2017-11-08 03:57:43,0
+90855,3,1,8,153,2017-11-09 04:54:20,0
+358710,3,1,47,280,2017-11-09 01:26:32,0
+66003,18,1,19,134,2017-11-07 05:19:28,0
+173751,3,1,15,205,2017-11-06 23:24:05,0
+67316,581,3032,607,347,2017-11-07 02:11:46,0
+123619,2,1,13,435,2017-11-07 09:30:12,0
+81571,3,1,10,130,2017-11-07 01:58:50,0
+38602,18,1,19,107,2017-11-07 09:42:40,0
+108172,18,1,6,107,2017-11-09 10:00:12,0
+82816,2,1,19,258,2017-11-09 04:58:27,0
+59064,14,1,22,439,2017-11-08 00:45:06,0
+27820,2,1,13,236,2017-11-09 01:29:50,0
+72164,9,1,16,215,2017-11-09 09:50:06,0
+116431,12,1,46,328,2017-11-07 08:30:09,0
+34680,3,1,19,205,2017-11-07 09:29:50,0
+5147,15,1,16,245,2017-11-08 04:55:19,0
+7377,22,1,19,496,2017-11-07 05:08:03,0
+328915,12,1,15,19,2017-11-09 01:05:48,0
+112812,12,1,18,328,2017-11-08 07:10:02,0
+9414,3,1,13,19,2017-11-06 17:26:55,0
+234055,15,1,13,386,2017-11-08 03:19:29,0
+181717,15,1,19,386,2017-11-07 06:07:10,0
+8157,26,1,9,266,2017-11-08 13:19:46,0
+67343,8,1,13,145,2017-11-06 16:42:42,0
+49431,6,1,8,459,2017-11-09 14:50:52,0
+28355,13,1,19,469,2017-11-07 14:47:04,0
+73767,14,1,37,401,2017-11-07 18:20:44,0
+67772,12,1,41,245,2017-11-08 10:19:56,0
+112012,18,1,19,121,2017-11-09 15:15:40,0
+37892,2,1,13,122,2017-11-07 07:27:03,0
+76022,9,1,37,215,2017-11-07 13:45:13,0
+287007,1,1,9,349,2017-11-08 01:30:58,0
+54786,6,1,19,125,2017-11-07 16:03:41,0
+62916,24,1,15,105,2017-11-07 01:11:20,0
+14792,2,1,6,435,2017-11-09 11:16:12,0
+225265,6,1,13,125,2017-11-08 09:17:00,0
+72002,18,1,19,107,2017-11-09 12:50:33,0
+59121,18,1,22,107,2017-11-08 23:58:27,0
+73487,12,1,19,326,2017-11-07 23:26:40,0
+168236,1,1,13,134,2017-11-09 00:04:13,0
+116581,2,1,19,317,2017-11-08 10:25:51,0
+89653,3,1,18,489,2017-11-08 00:22:09,0
+119149,1,1,1,134,2017-11-08 17:20:12,0
+226358,9,1,25,253,2017-11-08 01:22:38,0
+55852,9,2,17,334,2017-11-08 23:08:45,0
+8610,9,1,37,215,2017-11-06 16:56:00,0
+73347,12,1,22,328,2017-11-08 03:20:31,0
+87007,2,1,19,219,2017-11-07 06:19:30,0
+18332,2,1,13,237,2017-11-07 12:36:09,0
+105475,8,2,10,145,2017-11-09 02:56:32,0
+64741,12,1,25,328,2017-11-09 00:50:09,0
+80703,2,1,19,237,2017-11-07 02:28:11,0
+8924,3,1,14,480,2017-11-08 07:12:29,0
+36132,15,1,13,111,2017-11-09 07:46:18,0
+110595,14,1,19,439,2017-11-07 07:28:43,0
+10356,26,1,13,121,2017-11-09 07:08:49,0
+109361,15,1,53,430,2017-11-06 18:30:05,0
+58960,3,1,18,205,2017-11-08 17:25:00,0
+62937,3,1,16,280,2017-11-09 06:37:29,0
+51110,6,1,13,459,2017-11-09 09:12:56,0
+109149,32,1,19,376,2017-11-07 07:07:34,0
+48011,12,1,19,328,2017-11-09 05:50:16,0
+132262,14,1,52,442,2017-11-06 23:33:19,0
+174526,3,1,19,205,2017-11-07 15:10:53,0
+15819,1,1,19,439,2017-11-07 01:29:13,0
+73152,18,1,19,107,2017-11-08 04:59:12,0
+51179,12,1,13,265,2017-11-08 14:44:48,0
+80058,15,1,3,480,2017-11-09 02:43:40,0
+275888,5,1,8,377,2017-11-08 08:29:48,0
+249730,18,1,10,134,2017-11-08 10:04:40,0
+55649,3,2,6,280,2017-11-08 12:30:13,0
+94081,2,1,25,219,2017-11-09 06:37:34,0
+73487,12,1,19,245,2017-11-08 01:36:36,0
+209627,2,1,13,237,2017-11-08 13:14:15,0
+12087,3,1,19,452,2017-11-07 09:40:56,0
+111087,3,1,18,280,2017-11-08 09:14:54,0
+3850,3,1,13,409,2017-11-07 02:45:16,0
+37233,9,1,17,215,2017-11-07 16:02:08,0
+99855,3,1,20,280,2017-11-07 01:04:43,0
+38716,12,1,19,245,2017-11-06 18:36:37,0
+4324,3,1,6,173,2017-11-07 02:37:12,0
+30564,3,2,97,211,2017-11-06 22:51:45,0
+28200,12,1,18,497,2017-11-09 04:46:39,0
+115585,18,1,19,107,2017-11-09 07:01:36,0
+125459,6,1,13,459,2017-11-09 00:27:33,0
+7962,7,1,22,101,2017-11-09 10:45:24,0
+30564,3,1,23,211,2017-11-09 07:30:28,0
+102475,28,1,13,135,2017-11-09 14:02:07,0
+45929,12,1,19,245,2017-11-08 13:59:34,0
+48383,3,1,18,280,2017-11-08 14:03:41,0
+43827,15,1,10,245,2017-11-09 05:18:31,0
+328755,9,1,13,489,2017-11-09 05:13:36,0
+73954,2,1,8,469,2017-11-07 11:59:16,0
+241937,3,1,17,280,2017-11-08 05:53:49,0
+10434,9,1,19,107,2017-11-09 01:37:56,0
+191028,13,1,13,477,2017-11-09 02:30:36,0
+109161,26,1,32,266,2017-11-07 05:22:26,0
+78905,3,1,13,417,2017-11-07 05:48:12,0
+30164,13,1,16,469,2017-11-09 03:05:17,0
+112356,12,1,19,178,2017-11-09 10:23:34,0
+77612,12,1,11,259,2017-11-07 11:16:52,0
+247525,2,1,19,435,2017-11-08 03:00:36,0
+20398,15,1,14,245,2017-11-08 19:41:24,0
+198173,15,1,13,245,2017-11-07 08:57:22,0
+5314,18,1,19,121,2017-11-06 17:48:39,0
+67142,18,1,12,107,2017-11-09 01:38:37,0
+4405,15,1,19,430,2017-11-07 13:24:27,0
+108227,9,1,13,466,2017-11-08 04:59:15,0
+71335,55,1,13,453,2017-11-08 04:14:42,0
+46160,2,1,13,219,2017-11-09 13:51:43,0
+106210,18,1,19,107,2017-11-09 10:51:31,0
+83660,3,1,13,409,2017-11-07 13:55:04,0
+91232,15,1,35,379,2017-11-07 12:42:21,0
+50702,22,1,18,116,2017-11-08 13:44:56,0
+71710,21,1,13,128,2017-11-07 01:34:48,0
+27985,15,1,3,315,2017-11-07 15:33:30,0
+7230,22,1,25,116,2017-11-08 10:18:56,0
+48296,8,1,13,145,2017-11-07 23:26:59,0
+115641,14,1,19,489,2017-11-08 05:03:02,0
+42360,14,1,19,401,2017-11-08 11:03:51,0
+118534,3,1,13,442,2017-11-08 08:13:04,0
+257815,15,1,37,315,2017-11-09 01:08:12,0
+46680,9,1,13,466,2017-11-08 23:22:32,0
+64563,15,1,15,412,2017-11-07 08:48:28,0
+191846,2,1,13,452,2017-11-08 16:44:35,0
+106972,12,2,42,265,2017-11-08 09:18:33,0
+49462,3,1,10,280,2017-11-08 11:37:59,0
+87736,18,1,19,107,2017-11-09 11:21:59,0
+111215,2,1,19,237,2017-11-09 15:57:57,0
+53570,2,1,3,122,2017-11-06 23:51:43,0
+100971,14,1,16,349,2017-11-09 08:27:21,0
+81736,9,1,19,232,2017-11-08 16:41:07,0
+109020,11,1,13,325,2017-11-08 22:42:31,0
+114314,3,1,19,280,2017-11-07 12:01:04,0
+36565,12,1,16,409,2017-11-08 04:37:09,0
+101487,3,1,40,280,2017-11-09 02:17:39,0
+107173,2,1,13,377,2017-11-09 14:55:05,0
+27906,2,1,17,477,2017-11-07 00:08:43,0
+103147,2,1,19,236,2017-11-09 07:26:30,0
+126371,18,3543,748,107,2017-11-07 23:42:53,0
+120334,8,1,53,145,2017-11-07 11:18:18,0
+48240,3,1,19,409,2017-11-08 03:56:14,0
+114276,15,1,19,386,2017-11-08 04:40:33,0
+202528,2,1,19,237,2017-11-07 12:25:26,0
+123635,2,1,1,477,2017-11-08 11:11:36,0
+85150,17,1,13,280,2017-11-07 07:33:28,0
+155509,10,1,13,377,2017-11-07 16:05:01,0
+132492,24,1,10,178,2017-11-09 08:27:19,0
+162469,20,1,13,259,2017-11-09 15:48:49,0
+11492,14,1,19,379,2017-11-08 03:53:20,0
+116065,18,1,53,134,2017-11-08 09:09:54,0
+32323,21,1,8,128,2017-11-06 17:49:26,0
+65785,12,1,35,259,2017-11-08 14:15:21,0
+100203,28,1,19,135,2017-11-07 01:49:44,0
+247171,2,1,19,212,2017-11-08 06:18:30,0
+67658,17,1,22,280,2017-11-08 12:32:04,0
+115717,13,1,20,477,2017-11-09 10:44:18,0
+944,18,1,13,439,2017-11-08 22:21:29,0
+37320,3,1,13,280,2017-11-09 02:19:07,0
+81689,2,1,22,212,2017-11-08 09:35:04,0
+115837,20,1,13,259,2017-11-07 15:18:10,0
+14645,2,1,10,205,2017-11-08 22:09:41,0
+347391,2,1,53,477,2017-11-08 18:22:17,0
+205408,12,1,19,140,2017-11-09 09:00:23,0
+2334,3,1,19,489,2017-11-06 23:39:45,0
+241622,11,1,19,469,2017-11-08 10:35:55,0
+238025,3,1,73,280,2017-11-08 14:33:49,0
+121505,3,1,19,280,2017-11-08 11:20:14,0
+229183,9,1,3,127,2017-11-09 00:22:59,0
+95766,9,2,9,442,2017-11-07 12:26:24,0
+111685,18,1,19,134,2017-11-09 14:12:35,0
+108087,32,1,8,376,2017-11-08 15:25:04,0
+67409,18,1,19,107,2017-11-08 11:45:54,0
+17814,3,1,53,19,2017-11-08 07:05:25,0
+163326,2,1,17,237,2017-11-08 06:05:19,0
+464,3,1,13,280,2017-11-09 03:46:09,0
+69196,15,1,19,245,2017-11-07 14:50:34,0
+103097,18,1,19,107,2017-11-09 10:51:44,0
+131406,8,1,22,145,2017-11-09 05:03:01,0
+40631,14,1,23,442,2017-11-08 05:18:20,0
+30981,12,1,13,328,2017-11-09 01:54:01,0
+28183,12,1,17,328,2017-11-08 09:12:31,0
+97536,3,1,23,211,2017-11-08 12:38:31,0
+100868,15,1,19,412,2017-11-09 07:25:41,0
+86593,14,1,19,442,2017-11-07 19:39:44,0
+103190,3,1,9,371,2017-11-08 01:34:43,0
+269492,3,1,13,280,2017-11-09 06:07:31,0
+110985,15,1,19,140,2017-11-07 00:06:30,0
+10034,21,1,13,232,2017-11-07 16:02:06,0
+5348,9,1,22,442,2017-11-09 13:52:07,0
+351763,7,1,13,101,2017-11-09 06:09:52,0
+73487,2,1,174,469,2017-11-08 14:14:27,0
+95197,27,1,17,122,2017-11-08 07:20:47,0
+32000,9,1,13,127,2017-11-09 10:44:10,0
+80192,9,1,22,466,2017-11-09 10:33:59,0
+36150,2,2,10,205,2017-11-07 11:00:55,0
+3133,20,1,19,478,2017-11-07 05:20:14,0
+362938,2,1,22,477,2017-11-08 17:07:23,0
+20173,2,1,17,435,2017-11-06 23:40:13,0
+41736,26,1,13,266,2017-11-07 23:54:53,0
+24905,9,1,13,244,2017-11-07 03:52:35,0
+59059,6,1,32,125,2017-11-07 06:21:50,0
+58813,9,1,25,232,2017-11-09 04:21:57,0
+84301,9,1,19,450,2017-11-09 10:55:58,0
+95473,20,1,53,478,2017-11-07 15:54:38,0
+105125,2,1,13,435,2017-11-07 23:19:51,0
+56659,15,1,23,265,2017-11-08 04:01:27,0
+56918,3,1,13,173,2017-11-08 05:44:30,0
+81006,15,1,19,130,2017-11-08 04:30:48,0
+73487,9,1,19,215,2017-11-09 08:54:42,0
+80389,14,1,19,439,2017-11-09 10:43:40,0
+116425,14,1,13,113,2017-11-09 13:35:04,0
+19295,3,1,13,173,2017-11-07 03:11:43,0
+89164,8,2,13,145,2017-11-07 09:38:34,0
+88371,21,1,10,128,2017-11-08 04:22:27,0
+18061,18,1,748,107,2017-11-08 14:54:46,0
+88252,2,1,13,219,2017-11-07 07:04:03,0
+25074,15,1,31,153,2017-11-08 13:28:06,0
+86383,3,1,25,280,2017-11-07 01:54:43,0
+79075,9,1,17,134,2017-11-08 01:08:48,0
+100393,3,1,3,280,2017-11-07 04:25:30,0
+8975,8,1,3,145,2017-11-07 04:00:41,0
+149504,14,1,53,442,2017-11-07 13:24:17,0
+32643,15,1,13,379,2017-11-09 06:24:43,0
+15651,47,1,11,484,2017-11-09 13:19:58,0
+64960,23,1,19,153,2017-11-07 01:38:40,0
+238693,12,1,19,178,2017-11-08 02:49:53,0
+5449,12,1,18,245,2017-11-06 16:43:48,0
+105534,9,1,19,258,2017-11-08 14:51:16,0
+7597,17,1,6,280,2017-11-06 23:35:06,0
+173892,14,1,10,439,2017-11-07 12:14:38,0
+89782,12,1,23,259,2017-11-07 12:03:49,0
+52390,23,1,3,153,2017-11-06 23:08:46,0
+12450,9,1,18,466,2017-11-08 14:26:56,0
+108641,3,1,37,280,2017-11-08 11:31:52,0
+190943,17,1,20,280,2017-11-06 21:11:52,0
+124264,12,1,19,265,2017-11-09 11:34:26,0
+103805,9,1,10,334,2017-11-08 07:30:41,0
+70248,18,1,20,121,2017-11-06 18:02:38,0
+5574,18,1,10,107,2017-11-07 15:17:30,0
+221694,14,1,13,118,2017-11-09 09:59:08,0
+8506,15,1,15,480,2017-11-08 21:57:05,0
+5619,18,1,28,134,2017-11-08 05:34:01,0
+175442,2,1,19,469,2017-11-07 07:54:25,0
+144698,9,1,13,442,2017-11-08 00:40:50,0
+68453,11,1,37,481,2017-11-06 19:01:35,0
+108913,2,1,13,237,2017-11-08 18:41:50,0
+18985,6,1,17,459,2017-11-08 16:21:14,0
+65352,29,1,10,343,2017-11-08 09:43:19,0
+83388,13,1,22,400,2017-11-08 09:23:18,0
+19318,2,1,19,237,2017-11-07 04:11:00,0
+64815,21,1,13,232,2017-11-09 05:21:55,0
+2388,17,1,43,134,2017-11-07 09:40:38,0
+62879,20,2,18,259,2017-11-08 17:06:00,0
+925,21,1,26,128,2017-11-07 05:50:49,0
+53635,9,1,22,232,2017-11-08 05:55:13,0
+205134,21,2,19,128,2017-11-07 13:24:56,0
+46471,23,1,8,153,2017-11-07 01:01:05,0
+35221,18,1,1,107,2017-11-08 00:34:02,0
+104791,18,1,13,439,2017-11-07 00:17:06,0
+29223,2,1,19,477,2017-11-08 13:29:18,0
+1074,12,2,49,245,2017-11-07 15:20:20,0
+192756,15,1,13,245,2017-11-06 20:37:46,0
+33603,12,1,13,265,2017-11-09 13:27:29,0
+345248,18,1,19,107,2017-11-09 04:26:28,0
+84896,23,1,18,153,2017-11-07 20:11:01,0
+47171,2,1,25,435,2017-11-08 08:41:15,0
+156451,15,1,17,480,2017-11-07 06:44:55,0
+198025,2,1,19,122,2017-11-07 09:40:11,0
+3964,15,1,10,153,2017-11-08 13:48:37,0
+3964,12,1,22,481,2017-11-07 15:06:34,0
+340999,15,1,28,111,2017-11-09 02:36:14,0
+32788,3,1,13,280,2017-11-08 14:59:52,0
+263996,3,1,41,130,2017-11-07 23:57:02,0
+289845,12,1,13,409,2017-11-09 04:57:17,0
+46382,12,1,12,259,2017-11-07 02:09:20,0
+103567,3,1,19,130,2017-11-09 15:07:38,0
+318233,18,1,19,107,2017-11-09 07:05:59,0
+58028,3,1,31,115,2017-11-09 08:59:45,0
+34539,61,1,13,21,2017-11-07 09:39:39,0
+4760,2,1,23,236,2017-11-09 12:06:21,0
+106279,9,1,13,334,2017-11-09 14:22:15,0
+56516,12,1,19,178,2017-11-07 23:39:42,0
+79332,9,1,25,215,2017-11-07 12:05:47,0
+223989,3,1,13,280,2017-11-08 02:38:45,0
+118098,15,1,19,153,2017-11-08 01:11:14,0
+77761,13,1,19,469,2017-11-07 14:22:11,0
+65253,11,1,8,325,2017-11-09 12:05:40,0
+93021,2,1,19,477,2017-11-08 04:08:34,0
+54524,2,1,13,205,2017-11-08 03:23:20,0
+125062,15,1,8,245,2017-11-07 17:22:32,0
+109451,9,1,25,244,2017-11-07 21:03:43,0
+14737,9,1,19,466,2017-11-09 15:53:45,0
+47284,15,1,13,265,2017-11-08 03:42:32,0
+110710,3,1,53,480,2017-11-09 08:18:57,0
+25818,2,1,20,205,2017-11-08 14:14:38,0
+105269,12,1,9,265,2017-11-08 10:17:32,0
+78144,13,1,35,477,2017-11-07 15:20:44,0
+73516,8,1,13,145,2017-11-09 11:18:08,0
+45832,9,1,17,244,2017-11-07 01:34:43,0
+17289,12,1,8,245,2017-11-08 16:26:27,0
+105834,2,1,22,364,2017-11-08 07:20:19,0
+66587,15,1,13,278,2017-11-09 15:18:57,0
+114276,2,1,13,477,2017-11-07 23:48:46,0
+7819,18,1,31,317,2017-11-07 04:45:32,0
+112299,12,1,19,340,2017-11-09 13:04:33,0
+70552,2,1,15,205,2017-11-07 13:31:26,0
+97048,18,1,1,107,2017-11-08 08:46:23,0
+310364,14,1,3,467,2017-11-09 03:00:36,0
+135050,2,2,19,205,2017-11-07 10:40:56,0
+55920,12,1,18,178,2017-11-08 10:34:42,0
+90891,3,1,19,153,2017-11-08 19:07:36,0
+77048,9,1,18,489,2017-11-08 22:06:53,0
+58838,24,1,13,105,2017-11-08 18:46:01,0
+63411,3,1,22,442,2017-11-08 03:36:08,0
+30981,1,1,22,137,2017-11-07 09:29:57,0
+199458,3,1,19,409,2017-11-07 02:29:37,0
+24905,3,1,18,115,2017-11-07 00:28:54,0
+181022,3,1,6,130,2017-11-09 03:46:46,0
+152271,2,1,13,477,2017-11-09 09:26:06,0
+73734,18,1,19,134,2017-11-07 15:52:30,0
+37883,15,1,13,480,2017-11-07 23:29:03,0
+124608,2,1,13,243,2017-11-06 17:33:59,0
+102446,2,1,13,477,2017-11-08 05:53:32,0
+68568,3,1,19,480,2017-11-07 00:59:41,0
+18596,2,1,22,477,2017-11-08 16:59:00,0
+52596,12,1,13,340,2017-11-08 05:23:32,0
+283691,3,1,13,280,2017-11-09 03:54:28,0
+75595,2,1,17,237,2017-11-07 15:36:16,0
+73891,9,1,19,134,2017-11-08 00:21:42,0
+102280,21,1,17,128,2017-11-09 05:46:47,0
+5314,12,1,19,245,2017-11-09 04:56:00,0
+9308,13,1,13,477,2017-11-08 05:49:02,0
+59395,3,1,14,205,2017-11-07 11:42:31,0
+62075,169,3866,866,347,2017-11-09 09:49:40,0
+123839,2,1,11,469,2017-11-08 00:16:12,0
+140145,18,1,19,439,2017-11-09 12:17:10,0
+83257,9,1,25,215,2017-11-08 00:16:19,0
+27627,1,1,19,377,2017-11-09 10:55:41,0
+75595,15,1,16,245,2017-11-08 16:24:13,0
+99075,1,1,17,134,2017-11-06 18:03:25,0
+110330,3,1,20,280,2017-11-08 09:42:22,0
+48288,2,1,18,477,2017-11-06 17:14:00,0
+102280,3,2,11,135,2017-11-07 12:49:59,0
+123586,2,1,13,236,2017-11-09 15:51:28,0
+96891,12,1,41,340,2017-11-08 15:02:06,0
+308454,14,1,27,401,2017-11-09 04:56:30,0
+86767,12,1,19,409,2017-11-06 23:14:14,0
+194292,9,1,19,145,2017-11-08 10:03:53,0
+133598,8,1,12,140,2017-11-08 11:57:55,0
+31412,9,1,19,466,2017-11-09 03:42:01,0
+192891,12,1,19,328,2017-11-07 22:08:03,0
+226498,19,137,24,282,2017-11-08 11:36:57,1
+111690,3,1,53,424,2017-11-07 01:31:26,0
+81079,18,1,13,121,2017-11-07 15:56:06,0
+144757,3,1,22,205,2017-11-08 14:52:58,0
+220627,10,1,13,377,2017-11-07 16:22:55,0
+171351,18,1,13,134,2017-11-07 03:22:17,0
+119885,3,1,14,205,2017-11-09 03:41:46,0
+54472,9,1,20,134,2017-11-08 16:25:55,0
+88311,8,1,13,145,2017-11-09 03:07:05,0
+40216,10,1,6,317,2017-11-06 23:23:35,0
+270379,15,1,19,245,2017-11-07 17:34:14,0
+180419,8,1,13,259,2017-11-08 14:10:25,0
+101300,64,1,25,459,2017-11-09 10:55:12,0
+88992,2,1,17,477,2017-11-07 04:34:24,0
+27151,9,1,19,258,2017-11-09 06:04:51,0
+273910,12,2,10,178,2017-11-08 12:35:46,0
+171258,26,1,8,266,2017-11-07 00:12:02,0
+220858,12,1,47,245,2017-11-08 07:39:46,0
+44536,15,1,13,278,2017-11-09 15:11:21,0
+100276,2,1,8,469,2017-11-07 09:03:01,0
+147738,18,1,22,107,2017-11-09 11:00:55,0
+123994,2,1,19,236,2017-11-07 05:00:48,0
+90837,13,1,13,477,2017-11-07 09:28:48,0
+116718,2,1,17,237,2017-11-08 04:52:59,0
+35327,2,1,13,469,2017-11-07 13:41:45,0
+265483,23,1,13,153,2017-11-07 22:26:59,0
+76966,18,1,13,107,2017-11-09 00:08:46,0
+122982,9,1,8,466,2017-11-08 09:24:13,0
+5348,15,1,3,153,2017-11-09 10:10:45,0
+53454,15,1,13,379,2017-11-08 04:28:52,0
+26583,2,1,13,469,2017-11-08 16:52:38,0
+226429,1,1,19,125,2017-11-09 15:43:41,0
+25092,12,1,13,245,2017-11-08 15:57:16,0
+4463,3,1,22,489,2017-11-08 23:20:08,0
+151574,25,1,13,259,2017-11-07 03:01:27,0
+63712,9,1,30,466,2017-11-08 08:05:57,0
+105834,2,1,16,364,2017-11-09 02:54:15,0
+83616,21,1,26,128,2017-11-09 10:30:15,0
+205316,3,1,37,173,2017-11-09 08:16:49,0
+85188,24,1,19,105,2017-11-07 12:44:08,0
+25719,3,1,17,280,2017-11-08 03:15:49,0
+140941,17,1,13,280,2017-11-06 17:08:56,0
+32967,11,1,66,319,2017-11-09 03:38:12,0
+925,14,2,41,467,2017-11-08 15:09:40,0
+145592,6,1,22,459,2017-11-08 23:38:16,0
+71808,1,1,22,101,2017-11-09 13:13:44,0
+101214,2,1,17,205,2017-11-08 02:31:16,0
+167386,15,1,15,480,2017-11-09 07:31:34,0
+119304,21,1,47,128,2017-11-08 19:16:01,0
+54631,3,1,19,442,2017-11-08 10:23:29,0
+187282,11,1,36,487,2017-11-09 13:48:28,0
+77763,21,1,47,128,2017-11-08 04:46:54,0
+57459,1,1,20,115,2017-11-08 12:01:05,0
+50197,18,1,19,121,2017-11-07 14:21:51,0
+8503,2,1,19,435,2017-11-09 12:24:16,0
+17610,2,1,19,212,2017-11-07 16:29:05,0
+205285,9,1,43,466,2017-11-07 16:42:43,0
+109832,12,1,19,409,2017-11-09 04:13:43,0
+6705,18,1,15,107,2017-11-07 10:49:14,0
+31158,3,1,18,280,2017-11-07 06:03:22,0
+101096,17,1,13,280,2017-11-08 16:52:16,0
+64639,2,1,19,435,2017-11-07 00:40:08,0
+264067,2,1,13,237,2017-11-08 09:16:02,0
+58994,3,1,17,280,2017-11-09 04:46:42,0
+141518,3,1,19,137,2017-11-06 17:52:37,0
+56625,2,1,35,219,2017-11-08 02:04:48,0
+68371,12,1,13,205,2017-11-09 11:05:34,0
+100971,18,1,26,121,2017-11-06 20:21:26,0
+18161,15,1,18,153,2017-11-06 20:26:33,0
+15680,3,1,12,466,2017-11-06 18:56:49,0
+44494,12,1,13,105,2017-11-08 01:23:31,0
+198163,18,1,6,107,2017-11-06 21:46:11,0
+32252,18,1,13,439,2017-11-07 05:57:25,0
+38721,10,1,4,317,2017-11-08 01:13:01,0
+105834,2,1,16,364,2017-11-07 16:36:50,0
+92861,3,1,13,452,2017-11-07 20:27:29,0
+44595,29,1,18,213,2017-11-09 09:08:55,0
+5314,9,1,53,244,2017-11-09 15:27:37,0
+144897,9,1,19,258,2017-11-07 03:12:15,0
+69595,18,1,19,134,2017-11-07 10:24:23,0
+10434,9,1,13,134,2017-11-06 22:07:43,0
+99323,6,1,19,459,2017-11-09 01:33:00,0
+10544,15,1,19,130,2017-11-08 01:30:56,0
+6641,12,1,31,265,2017-11-07 11:16:07,0
+360070,9,1,13,244,2017-11-09 03:11:37,0
+63369,26,1,13,121,2017-11-08 16:54:13,0
+13483,8,1,30,145,2017-11-07 09:16:56,0
+68216,2,1,19,237,2017-11-07 00:21:00,0
+24437,27,2,13,153,2017-11-08 02:26:31,0
+66906,21,1,32,232,2017-11-09 12:22:07,0
+37892,23,1,6,153,2017-11-08 14:20:18,0
+121679,20,1,18,478,2017-11-09 04:58:31,0
+125396,18,1,8,317,2017-11-07 04:17:14,0
+767,3,1,13,280,2017-11-08 02:21:08,0
+90509,9,1,13,232,2017-11-09 03:51:30,0
+300768,9,1,14,134,2017-11-09 14:52:41,0
+85424,3,1,13,280,2017-11-07 03:31:50,0
+56019,3,1,37,452,2017-11-08 17:08:34,0
+5418,9,1,3,215,2017-11-07 10:46:40,0
+42117,3,1,13,205,2017-11-09 14:05:25,0
+31544,15,1,15,386,2017-11-09 15:14:08,0
+130834,15,1,22,130,2017-11-06 17:53:17,0
+176471,18,1,32,134,2017-11-07 02:45:33,0
+92766,9,2,58,445,2017-11-07 11:14:49,0
+102738,9,1,19,466,2017-11-09 06:03:33,0
+350988,1,1,19,134,2017-11-09 12:20:28,0
+292641,9,1,6,127,2017-11-09 12:39:49,0
+7617,3,1,19,442,2017-11-08 07:34:29,0
+85302,12,1,9,265,2017-11-08 09:31:43,0
+1456,8,1,13,145,2017-11-09 07:39:06,0
+26759,26,1,19,266,2017-11-09 00:31:01,0
+180643,15,1,22,153,2017-11-07 04:12:15,0
+141094,24,1,11,105,2017-11-07 22:39:55,0
+73333,2,1,19,435,2017-11-07 14:27:19,0
+59295,18,1,13,107,2017-11-06 23:01:09,0
+131298,12,1,19,265,2017-11-07 10:00:53,0
+99809,3,1,13,280,2017-11-07 02:55:43,0
+79827,9,1,1,232,2017-11-07 01:33:12,0
+47997,15,1,25,130,2017-11-07 14:58:32,0
+112139,12,2,49,245,2017-11-07 23:35:43,0
+121026,15,1,15,245,2017-11-08 02:34:48,0
+43778,23,1,40,153,2017-11-08 11:02:05,0
+348371,9,1,36,489,2017-11-09 04:30:11,0
+35973,9,1,28,244,2017-11-08 01:06:12,0
+128022,315,1,13,110,2017-11-07 02:21:37,0
+125581,11,1,8,173,2017-11-09 02:23:07,0
+37426,12,1,3,178,2017-11-08 08:00:17,0
+61230,3,1,10,424,2017-11-07 23:52:15,0
+64756,13,1,19,477,2017-11-07 06:03:49,0
+7131,21,1,9,128,2017-11-08 05:43:31,0
+69034,12,1,30,409,2017-11-09 03:43:36,0
+310927,3,1,49,173,2017-11-09 08:48:22,0
+70474,12,1,18,140,2017-11-08 06:14:07,0
+75453,11,1,19,319,2017-11-07 23:15:31,0
+75453,3,1,19,280,2017-11-09 01:31:27,0
+105290,2,1,13,237,2017-11-06 16:15:54,0
+7474,1,1,9,135,2017-11-09 05:01:57,0
+104502,15,1,18,265,2017-11-07 14:23:36,0
+43537,1,1,16,124,2017-11-07 09:30:40,0
+2786,13,1,13,477,2017-11-09 02:43:53,0
+10410,9,1,10,466,2017-11-09 04:10:24,0
+30463,15,1,19,245,2017-11-08 04:42:04,0
+60311,15,1,19,278,2017-11-09 08:20:52,0
+52600,9,1,13,134,2017-11-08 06:23:45,0
+90626,23,1,41,153,2017-11-07 03:01:49,0
+30587,1,1,18,153,2017-11-08 04:22:30,0
+89940,18,3032,607,107,2017-11-06 23:29:05,0
+279352,9,1,41,466,2017-11-08 14:37:45,0
+103065,15,1,46,278,2017-11-08 14:56:47,0
+13186,3,1,22,182,2017-11-09 12:45:53,0
+97013,9,1,19,334,2017-11-07 00:08:00,0
+95766,3,1,13,480,2017-11-07 05:47:44,0
+67241,27,1,19,122,2017-11-07 10:44:17,0
+172697,12,1,6,259,2017-11-07 00:07:14,0
+44494,3,1,20,153,2017-11-07 16:56:10,0
+35436,14,1,22,489,2017-11-08 08:51:02,0
+81776,12,1,22,242,2017-11-08 07:54:25,0
+53479,18,1,17,107,2017-11-07 15:20:17,0
+101499,13,1,35,477,2017-11-08 01:05:15,0
+158557,3,1,25,280,2017-11-09 03:24:53,0
+61718,26,1,19,477,2017-11-08 23:43:59,0
+81837,9,2,35,234,2017-11-08 15:57:24,0
+12874,15,1,35,245,2017-11-07 13:17:28,0
+178771,2,1,19,236,2017-11-09 07:49:31,0
+67445,12,1,13,340,2017-11-08 11:18:47,0
+101921,13,1,13,477,2017-11-07 11:54:29,0
+93486,9,1,19,258,2017-11-08 06:47:08,0
+89192,14,1,19,467,2017-11-09 00:32:46,0
+123994,12,1,19,265,2017-11-09 07:47:46,0
+64609,14,1,19,463,2017-11-08 02:35:53,0
+8292,12,1,13,326,2017-11-06 23:35:35,0
+73296,1,1,37,153,2017-11-07 06:36:06,0
+66437,12,1,19,245,2017-11-08 11:17:38,0
+89821,15,1,13,430,2017-11-08 14:44:22,0
+8438,3,1,3,280,2017-11-08 01:50:46,0
+205301,18,1,17,107,2017-11-08 04:47:37,0
+125339,21,1,13,128,2017-11-09 07:53:18,0
+90904,2,1,19,212,2017-11-09 00:07:03,0
+33888,14,1,47,379,2017-11-09 09:34:19,0
+27429,15,1,13,430,2017-11-07 09:19:38,0
+114722,12,1,22,245,2017-11-09 04:27:30,0
+141352,3,1,13,130,2017-11-07 06:23:38,0
+199552,20,1,1,259,2017-11-08 17:05:39,0
+114276,18,1,19,107,2017-11-08 08:40:37,0
+29101,1,1,13,24,2017-11-09 10:21:56,0
+48282,2,1,13,122,2017-11-07 14:24:47,0
+2770,12,1,17,178,2017-11-07 13:18:39,0
+57783,14,1,42,446,2017-11-09 12:37:32,0
+55632,28,1,2,135,2017-11-07 02:08:41,0
+144253,3,1,8,130,2017-11-09 15:17:10,0
+92766,9,1,37,234,2017-11-09 06:45:06,0
+28010,12,1,13,265,2017-11-07 06:52:43,0
+33247,3,1,19,424,2017-11-09 09:19:15,0
+64187,12,1,19,178,2017-11-07 03:21:21,0
+66176,11,1,19,122,2017-11-07 15:08:34,0
+92673,3,2,44,153,2017-11-07 14:59:56,0
+84774,12,1,3,328,2017-11-09 15:04:52,0
+159361,6,1,19,125,2017-11-07 14:05:14,0
+81514,21,2,18,128,2017-11-08 09:58:30,0
+81792,18,1,40,107,2017-11-08 07:28:22,0
+107274,24,2,19,105,2017-11-09 15:23:20,0
+66663,18,1,41,107,2017-11-08 16:38:44,0
+48240,18,1,13,379,2017-11-09 15:11:41,0
+47073,12,1,9,245,2017-11-06 16:15:10,0
+49602,20,1,25,259,2017-11-08 14:18:51,0
+1415,22,1,19,116,2017-11-09 14:01:06,0
+89757,2,1,19,205,2017-11-09 03:14:29,0
+184901,9,1,19,445,2017-11-08 09:56:42,0
+80300,14,1,19,349,2017-11-08 13:56:56,0
+49407,2,1,42,477,2017-11-07 15:00:57,0
+145003,3,1,10,280,2017-11-08 12:18:08,0
+41092,12,1,13,340,2017-11-08 17:49:49,0
+21276,12,1,13,265,2017-11-08 23:19:47,0
+116558,12,1,17,265,2017-11-08 03:44:53,0
+59470,21,1,10,128,2017-11-08 02:27:45,0
+7617,3,1,14,280,2017-11-08 04:04:30,0
+171808,15,1,6,386,2017-11-07 14:41:49,0
+5729,3,1,19,409,2017-11-06 22:12:29,0
+201920,64,1,19,459,2017-11-07 14:51:38,0
+114276,3,1,19,135,2017-11-08 11:58:28,0
+94874,22,1,13,116,2017-11-08 06:41:28,0
+7134,10,1,13,377,2017-11-09 10:21:50,0
+100701,3,1,19,280,2017-11-08 04:34:50,0
+33641,18,1,1,107,2017-11-09 06:00:10,0
+102025,11,1,20,137,2017-11-09 00:36:11,0
+14839,14,1,27,480,2017-11-08 15:09:01,0
+76554,9,1,19,334,2017-11-09 05:02:56,0
+180643,18,1,13,121,2017-11-07 10:36:37,0
+728,2,1,22,219,2017-11-07 21:01:35,0
+93054,6,1,9,125,2017-11-08 12:40:37,0
+134147,9,1,19,232,2017-11-07 07:40:42,0
+80605,3,1,31,137,2017-11-09 13:54:43,0
+32587,9,1,9,134,2017-11-08 01:08:41,0
+58357,12,1,18,219,2017-11-07 10:48:51,0
+41287,3,1,13,409,2017-11-06 18:48:56,0
+5348,15,1,37,278,2017-11-09 05:00:45,0
+100013,3,1,8,409,2017-11-08 06:52:10,0
+95763,3,2,9,137,2017-11-07 23:54:10,0
+88018,3,1,19,280,2017-11-08 11:03:15,0
+174803,2,1,13,469,2017-11-07 09:26:44,0
+155720,14,1,17,379,2017-11-06 23:26:42,0
+93021,15,1,3,245,2017-11-07 02:38:12,0
+73516,12,1,17,259,2017-11-09 14:33:13,0
+5348,2,2,32,122,2017-11-07 11:41:59,0
+5314,15,1,15,379,2017-11-08 11:54:34,0
+91311,107,1,18,171,2017-11-07 07:47:14,0
+4420,2,1,15,236,2017-11-08 06:01:27,0
+109743,3,1,25,379,2017-11-09 01:38:39,0
+103866,8,1,9,145,2017-11-09 07:54:09,0
+166001,9,1,19,334,2017-11-08 03:27:07,0
+123803,26,1,19,121,2017-11-09 09:31:29,0
+37255,15,1,13,245,2017-11-08 16:14:02,0
+53408,15,1,17,140,2017-11-09 11:41:17,0
+34304,14,1,16,463,2017-11-06 21:48:49,0
+268343,15,1,19,245,2017-11-08 02:32:29,0
+44242,3,1,19,280,2017-11-07 13:01:19,0
+4295,15,2,15,245,2017-11-08 14:27:14,0
+95509,26,1,13,121,2017-11-09 11:13:50,0
+35180,27,1,47,122,2017-11-08 06:05:02,0
+9287,12,1,13,178,2017-11-07 14:46:27,0
+115772,9,1,16,466,2017-11-08 03:01:22,0
+47168,9,1,17,215,2017-11-07 00:25:13,0
+114999,13,1,8,477,2017-11-09 05:10:39,0
+7521,14,1,19,480,2017-11-09 02:38:50,0
+841,15,1,25,480,2017-11-08 14:17:10,0
+66743,1,1,13,134,2017-11-06 20:24:08,0
+88528,18,1,19,107,2017-11-07 05:09:53,0
+80305,9,1,17,445,2017-11-08 01:01:30,0
+137052,9,1,13,442,2017-11-06 22:43:19,0
+109770,15,1,9,265,2017-11-07 01:44:05,0
+46077,3,1,13,205,2017-11-09 05:08:39,0
+96165,15,1,13,153,2017-11-09 14:47:43,0
+28434,3,1,10,280,2017-11-08 03:08:26,0
+80228,2,1,13,477,2017-11-06 23:16:10,0
+214767,23,1,13,153,2017-11-08 07:24:19,0
+251341,2,1,13,237,2017-11-09 02:04:20,0
+95388,8,1,19,145,2017-11-07 16:26:09,0
+41445,6,1,13,125,2017-11-07 07:02:48,0
+38866,2,1,17,477,2017-11-09 05:55:38,0
+72253,6,1,17,125,2017-11-08 01:03:06,0
+211410,2,1,18,477,2017-11-09 00:15:29,0
+114802,2,1,13,477,2017-11-08 22:25:06,0
+105485,18,1,19,107,2017-11-08 07:22:00,0
+60235,3,1,19,402,2017-11-06 22:10:12,0
+66175,2,1,34,219,2017-11-08 00:38:00,0
+102118,15,1,22,3,2017-11-08 10:42:25,0
+10759,15,1,19,315,2017-11-07 12:07:57,0
+12948,24,1,47,105,2017-11-08 15:53:02,0
+114276,3,1,19,489,2017-11-08 04:36:55,0
+34738,3,1,13,280,2017-11-07 05:42:29,0
+59384,1,1,19,153,2017-11-07 15:14:29,0
+76181,12,2,9,178,2017-11-09 06:01:03,0
+362641,14,1,10,467,2017-11-09 05:39:56,0
+105649,2,2,8,205,2017-11-08 13:47:23,0
+5740,2,1,32,237,2017-11-08 03:08:44,0
+45769,8,1,19,145,2017-11-09 08:13:16,0
+92721,64,1,41,459,2017-11-06 17:43:32,0
+17013,3,1,16,280,2017-11-08 01:56:28,0
+184092,13,1,607,400,2017-11-06 16:00:15,0
+20385,14,1,19,379,2017-11-09 11:36:18,0
+195931,28,1,22,135,2017-11-07 05:35:15,0
+12399,3,1,16,137,2017-11-07 23:26:33,0
+38861,9,1,3,334,2017-11-08 06:35:58,0
+1091,3,1,41,280,2017-11-08 15:37:31,0
+22677,3,1,17,409,2017-11-07 03:34:05,0
+92376,18,1,17,121,2017-11-09 15:41:42,0
+7377,3,1,3,280,2017-11-09 05:25:27,0
+142716,2,1,13,435,2017-11-07 12:25:06,0
+73516,3,2,27,153,2017-11-07 07:15:34,0
+83252,23,1,6,153,2017-11-07 09:01:26,0
+108180,18,1,27,121,2017-11-09 11:52:51,0
+88923,8,1,19,145,2017-11-08 23:41:21,0
+107969,64,1,23,459,2017-11-08 06:12:07,0
+53454,3,2,13,417,2017-11-09 13:15:34,0
+50395,2,1,13,477,2017-11-09 05:56:06,0
+54401,12,1,20,245,2017-11-06 16:58:51,0
+50197,15,1,13,3,2017-11-07 01:39:55,0
+74068,2,1,8,237,2017-11-08 10:51:02,0
+255171,18,1,1,107,2017-11-09 12:28:29,0
+65792,12,1,18,245,2017-11-07 09:09:24,0
+121616,9,1,19,334,2017-11-09 07:43:24,0
+79666,29,2,9,343,2017-11-09 03:10:08,0
+8208,2,1,19,237,2017-11-09 04:20:23,0
+5348,3,1,13,280,2017-11-08 13:59:50,0
+33443,15,1,19,278,2017-11-08 12:18:21,0
+54868,3,1,53,466,2017-11-08 00:26:35,0
+112418,15,1,13,245,2017-11-07 08:41:20,0
+21660,12,1,13,178,2017-11-07 09:00:46,0
+9660,2,1,47,237,2017-11-07 05:38:08,0
+92816,6,1,3,125,2017-11-09 07:19:41,0
+50869,20,2,42,259,2017-11-07 08:42:48,0
+121608,3,1,13,280,2017-11-08 00:52:31,0
+80305,3,1,41,115,2017-11-08 15:21:40,0
+30059,3,1,17,280,2017-11-09 04:29:38,0
+262075,20,1,13,478,2017-11-08 01:22:42,0
+93054,5,1,15,377,2017-11-08 23:43:56,0
+121278,15,1,13,265,2017-11-06 23:39:54,0
+5314,12,2,17,178,2017-11-07 11:25:25,0
+86291,3,1,19,371,2017-11-08 00:14:23,0
+125571,2,1,17,435,2017-11-08 00:09:20,0
+94068,21,1,19,128,2017-11-07 07:47:56,0
+79456,14,1,17,480,2017-11-09 15:02:15,0
+44327,3,1,19,371,2017-11-07 00:46:06,0
+29164,2,1,13,477,2017-11-08 07:53:48,0
+81263,11,1,17,219,2017-11-09 11:33:06,0
+118367,26,1,13,266,2017-11-07 12:30:30,0
+6410,13,1,19,469,2017-11-09 07:02:42,0
+283045,15,1,16,245,2017-11-08 06:12:50,0
+103773,3,1,1,466,2017-11-07 00:55:30,0
+1082,3,1,18,280,2017-11-08 15:26:08,0
+1235,14,1,15,480,2017-11-09 06:56:15,0
+77048,8,2,19,145,2017-11-07 14:46:02,0
+272361,14,1,8,401,2017-11-08 07:50:46,0
+76255,3,1,1,280,2017-11-09 07:14:20,0
+68352,3,1,8,424,2017-11-09 03:52:04,0
+138599,9,1,13,134,2017-11-08 16:05:12,0
+125456,12,1,18,328,2017-11-07 09:11:36,0
+106795,9,1,19,466,2017-11-08 22:22:42,0
+44595,14,2,25,118,2017-11-09 09:33:07,0
+835,2,1,19,477,2017-11-08 03:35:47,0
+89078,14,1,18,489,2017-11-09 01:42:34,0
+59882,2,1,22,122,2017-11-07 06:05:11,0
+110077,2,1,19,477,2017-11-06 23:37:44,0
+47534,3,1,17,280,2017-11-07 05:05:31,0
+17629,25,1,13,259,2017-11-09 02:20:31,0
+116812,2,1,16,237,2017-11-09 03:06:43,0
+49383,3,2,36,280,2017-11-08 15:24:53,0
+99613,23,1,13,153,2017-11-07 16:46:33,0
+14236,2,2,65,477,2017-11-08 05:47:38,0
+44229,2,1,22,219,2017-11-07 12:19:00,0
+94115,3,1,16,442,2017-11-09 01:12:18,0
+200066,14,1,19,401,2017-11-07 02:44:24,0
+197144,3,1,22,173,2017-11-08 00:06:10,0
+45344,2,1,32,435,2017-11-09 05:34:19,0
+98261,2,1,56,452,2017-11-08 05:26:10,0
+184884,3,1,19,409,2017-11-07 08:45:09,0
+91536,2,2,16,205,2017-11-08 00:24:58,0
+300283,15,1,8,386,2017-11-09 05:00:45,0
+182576,6,1,19,459,2017-11-07 09:31:50,0
+18641,12,1,25,140,2017-11-07 12:06:42,0
+27997,9,1,13,244,2017-11-09 04:04:56,0
+133103,25,1,41,259,2017-11-08 13:13:36,0
+43865,3,1,19,280,2017-11-08 07:20:03,0
+35456,2,1,17,477,2017-11-07 23:22:32,0
+105339,2,1,27,219,2017-11-09 05:17:49,0
+37207,18,1,19,107,2017-11-09 00:39:24,0
+65982,18,1,18,121,2017-11-06 17:27:22,0
+13756,3,1,20,489,2017-11-07 13:08:29,0
+4147,18,1,32,449,2017-11-08 02:31:55,0
+229747,6,1,11,459,2017-11-08 01:25:53,0
+99856,2,1,23,236,2017-11-09 12:55:34,0
+39780,14,1,40,401,2017-11-07 08:15:55,0
+16032,15,1,6,412,2017-11-09 04:32:49,0
+186803,18,1,13,439,2017-11-08 08:06:54,0
+7595,14,1,8,360,2017-11-08 01:18:46,0
+82039,18,1,8,107,2017-11-09 00:08:49,0
+84916,9,1,13,234,2017-11-08 07:46:38,0
+7884,2,1,22,258,2017-11-06 23:43:30,0
+129803,13,1,19,469,2017-11-07 07:31:21,0
+3964,3,1,1,280,2017-11-08 16:01:08,0
+34419,9,1,13,244,2017-11-07 23:00:51,0
+123994,3,1,35,280,2017-11-08 04:27:58,0
+137832,64,1,16,459,2017-11-08 04:19:25,0
+69020,2,1,19,469,2017-11-07 04:03:08,0
+119715,14,1,19,480,2017-11-06 17:14:47,0
+104985,9,1,18,489,2017-11-08 04:30:26,0
+111757,3,1,47,280,2017-11-08 16:00:13,0
+59955,18,1,11,134,2017-11-07 00:54:14,0
+100901,9,1,17,445,2017-11-08 10:37:48,0
+83451,26,1,14,121,2017-11-07 00:35:18,0
+13727,2,1,6,477,2017-11-08 23:03:41,0
+66519,12,1,6,481,2017-11-09 04:14:59,0
+40816,12,1,18,178,2017-11-07 01:21:58,0
+11594,15,1,17,245,2017-11-09 03:36:08,0
+59159,3,1,19,379,2017-11-09 06:51:37,0
+148472,12,1,8,105,2017-11-08 07:00:43,0
+81939,10,1,18,377,2017-11-08 15:00:19,0
+101467,3,1,19,280,2017-11-09 01:40:40,0
+45411,27,1,19,153,2017-11-09 06:36:46,0
+67908,3,1,13,280,2017-11-09 02:23:14,0
+69856,15,1,32,245,2017-11-07 23:12:31,0
+7895,21,1,3,128,2017-11-08 09:52:09,0
+336891,3,1,13,115,2017-11-09 13:31:10,0
+36129,13,1,13,477,2017-11-09 01:57:08,0
+81347,64,1,8,459,2017-11-08 07:06:38,0
+56298,9,1,17,244,2017-11-08 11:26:34,0
+225265,11,1,13,137,2017-11-08 15:19:12,0
+105889,12,1,17,145,2017-11-07 16:14:00,0
+77809,7,1,19,101,2017-11-09 03:53:30,0
+23907,26,1,20,121,2017-11-08 14:46:08,0
+38265,15,1,10,245,2017-11-09 13:26:24,0
+48915,6,1,13,125,2017-11-07 12:37:16,0
+25009,3,1,17,280,2017-11-07 07:43:58,0
+8469,13,1,10,477,2017-11-07 07:14:22,0
+31536,3,1,18,480,2017-11-07 02:34:21,0
+183426,2,1,17,452,2017-11-07 14:23:16,0
+3133,21,1,18,128,2017-11-08 09:12:48,0
+80398,12,1,31,265,2017-11-08 05:25:01,0
+31675,9,1,19,489,2017-11-09 08:35:20,0
+108881,1,1,15,17,2017-11-07 14:28:50,0
+15660,3,1,8,424,2017-11-08 00:53:47,0
+124936,3,1,3,280,2017-11-09 00:51:15,0
+107488,15,1,8,245,2017-11-08 04:54:26,0
+176565,18,1,13,134,2017-11-09 03:52:09,0
+93021,18,1,13,121,2017-11-09 04:50:21,0
+31737,3,1,13,417,2017-11-07 03:07:22,0
+39756,2,1,8,205,2017-11-06 17:27:54,0
+7510,27,1,17,153,2017-11-09 04:22:56,0
+89189,3,1,20,280,2017-11-08 08:54:18,0
+44494,12,2,20,326,2017-11-06 22:53:53,0
+10853,23,1,13,153,2017-11-07 11:12:37,0
+75393,2,1,19,435,2017-11-07 11:34:13,0
+98066,6,1,53,459,2017-11-08 14:19:55,0
+81876,3,1,40,424,2017-11-08 18:15:43,0
+5348,8,1,1,145,2017-11-08 09:05:29,0
+52722,3,1,37,173,2017-11-08 12:00:12,0
+190924,15,1,19,265,2017-11-08 23:03:04,0
+34284,27,1,42,153,2017-11-09 11:18:33,0
+185800,12,1,23,259,2017-11-08 01:12:46,0
+5314,18,1,19,107,2017-11-08 10:29:28,0
+5314,1,1,19,377,2017-11-09 12:17:17,0
+49602,9,1,25,466,2017-11-08 23:19:20,0
+32788,21,1,3,128,2017-11-08 01:48:12,0
+42143,21,1,10,128,2017-11-07 08:51:43,0
+103032,2,1,19,237,2017-11-08 04:21:44,0
+90612,28,1,13,135,2017-11-07 15:41:59,0
+87970,12,1,18,259,2017-11-07 01:31:22,0
+37438,11,1,47,122,2017-11-08 05:30:10,0
+43827,18,1,13,107,2017-11-09 13:09:18,0
+216981,3,1,17,280,2017-11-08 14:11:12,0
+17149,19,0,24,213,2017-11-09 04:08:29,0
+84900,12,1,18,178,2017-11-07 23:16:18,0
+184454,12,1,18,497,2017-11-09 03:38:53,0
+2511,15,1,18,245,2017-11-07 15:20:03,0
+5348,28,1,19,135,2017-11-07 12:10:05,0
+73487,15,1,19,245,2017-11-07 06:48:04,0
+210483,3,1,13,280,2017-11-07 01:19:02,0
+148276,45,1,47,465,2017-11-07 06:03:21,1
+61741,6,1,13,125,2017-11-08 06:29:19,0
+148650,2,1,19,237,2017-11-08 05:59:07,0
+96078,3,1,13,205,2017-11-06 22:01:05,0
+69930,2,1,13,122,2017-11-07 16:29:42,0
+39338,8,1,13,145,2017-11-09 09:52:53,0
+86188,15,1,19,379,2017-11-08 04:28:34,0
+5257,6,1,13,459,2017-11-09 07:44:44,0
+101698,15,1,13,386,2017-11-08 17:36:34,0
+37763,18,1,3,107,2017-11-09 15:14:32,0
+354707,3,1,6,280,2017-11-09 07:24:30,0
+49462,1,2,20,134,2017-11-09 02:37:29,0
+135873,15,1,13,315,2017-11-09 08:02:55,0
+91232,12,2,13,178,2017-11-08 12:55:35,0
+8768,15,1,19,111,2017-11-09 03:51:36,0
+66100,21,2,32,128,2017-11-06 17:50:41,0
+45327,2,1,19,205,2017-11-07 17:47:49,0
+95111,2,1,2,435,2017-11-08 14:45:54,0
+34636,28,1,18,135,2017-11-07 05:42:03,0
+121505,2,1,13,237,2017-11-08 03:59:27,0
+26807,9,1,17,232,2017-11-07 08:26:25,0
+35520,21,1,17,128,2017-11-09 12:12:51,0
+33503,12,1,13,265,2017-11-08 02:04:18,0
+95766,25,1,13,259,2017-11-09 12:45:04,0
+81329,2,1,13,477,2017-11-08 06:21:59,0
+7884,18,1,19,107,2017-11-07 07:32:31,0
+7645,6,1,10,459,2017-11-08 12:57:33,0
+44150,12,1,19,259,2017-11-09 08:17:56,0
+4775,9,1,19,489,2017-11-08 14:55:21,0
+1456,9,1,13,445,2017-11-08 03:43:08,0
+121187,13,1,13,469,2017-11-08 11:27:36,0
+81030,8,1,3,145,2017-11-09 03:50:29,0
+195673,19,0,50,343,2017-11-06 16:23:02,1
+94013,13,1,19,477,2017-11-08 05:04:51,0
+138561,3,2,9,280,2017-11-08 08:56:36,0
+192796,3,1,13,480,2017-11-07 11:26:56,0
+199488,18,1,13,121,2017-11-07 10:11:10,0
+274381,94,1,17,361,2017-11-08 02:24:31,0
+239520,3,1,8,280,2017-11-08 15:31:02,0
+80405,26,1,15,121,2017-11-08 02:00:48,0
+105292,2,1,8,205,2017-11-07 20:49:17,0
+2095,12,1,35,328,2017-11-08 14:04:12,0
+91574,1,1,11,125,2017-11-07 21:57:53,0
+56346,12,1,13,245,2017-11-06 19:05:07,0
+75595,2,1,19,205,2017-11-07 16:19:03,0
+33868,9,1,30,466,2017-11-09 12:41:33,0
+240578,12,1,3,178,2017-11-07 23:58:31,0
+124825,9,1,13,466,2017-11-09 05:08:27,0
+91712,14,1,22,401,2017-11-08 02:55:30,0
+205511,1,1,19,153,2017-11-07 08:07:16,0
+283648,3,1,20,280,2017-11-08 00:34:07,0
+37385,9,1,25,134,2017-11-06 20:20:03,0
+265115,3,1,19,280,2017-11-08 13:47:12,0
+103250,3,1,37,442,2017-11-07 01:58:05,0
+246825,12,1,19,328,2017-11-09 04:02:38,0
+31732,20,2,19,259,2017-11-07 18:51:13,0
+7360,8,1,3,145,2017-11-07 23:21:01,0
+108831,3,1,13,280,2017-11-08 11:25:59,0
+73839,15,1,32,153,2017-11-07 12:39:49,0
+77257,15,1,19,111,2017-11-08 02:43:41,0
+48282,12,1,19,265,2017-11-08 10:04:57,0
+25761,2,1,19,205,2017-11-07 18:41:45,0
+59678,12,1,13,265,2017-11-08 11:09:58,0
+84596,6,1,19,125,2017-11-07 11:04:04,0
+50526,12,1,22,178,2017-11-08 08:21:33,0
+196389,2,1,25,477,2017-11-09 07:23:50,0
+202183,3,1,4,130,2017-11-07 23:42:59,0
+42215,14,1,19,349,2017-11-09 04:32:41,0
+67037,2,1,15,237,2017-11-06 20:24:36,0
+144604,12,2,13,265,2017-11-07 01:15:07,0
+46783,1,1,18,153,2017-11-08 06:55:55,0
+81001,18,1,28,121,2017-11-09 13:07:07,0
+189301,12,1,37,328,2017-11-09 13:19:57,0
+52043,9,2,43,234,2017-11-07 09:21:01,0
+6908,14,1,18,442,2017-11-09 09:05:36,0
+97684,15,2,9,315,2017-11-09 07:35:58,0
+193812,15,1,22,245,2017-11-08 16:45:06,0
+108858,3,1,13,280,2017-11-07 06:03:12,0
+101957,26,1,35,266,2017-11-07 06:59:38,0
+111517,25,1,18,259,2017-11-07 06:17:28,0
+116098,18,1,17,107,2017-11-07 03:04:04,0
+151044,7,1,18,101,2017-11-07 11:22:33,0
+113723,14,1,9,134,2017-11-07 07:37:38,0
+279107,3,1,19,280,2017-11-08 11:28:48,0
+104945,6,1,19,459,2017-11-08 05:33:41,0
+197265,2,1,4,219,2017-11-09 11:39:50,0
+352328,1,1,19,13,2017-11-09 11:52:49,0
+14782,2,1,13,205,2017-11-07 15:00:32,0
+86349,18,1,25,121,2017-11-07 13:07:35,0
+42360,3,1,20,205,2017-11-07 09:31:24,0
+99071,3,1,13,280,2017-11-07 03:08:08,0
+115557,2,1,19,205,2017-11-07 03:51:21,0
+197554,14,1,19,134,2017-11-08 23:19:53,0
+105649,2,1,19,205,2017-11-09 08:21:32,0
+121654,32,1,16,376,2017-11-07 09:16:32,0
+229176,64,1,41,459,2017-11-08 14:55:43,0
+48094,12,1,15,265,2017-11-07 03:20:06,0
+68499,18,1,19,121,2017-11-08 10:27:46,0
+141270,3,1,53,130,2017-11-07 08:38:24,0
+59059,11,1,37,173,2017-11-09 01:02:46,0
+109452,3,1,18,280,2017-11-07 01:14:17,0
+49938,21,2,42,128,2017-11-08 14:41:59,0
+148546,14,1,13,463,2017-11-07 11:43:22,0
+136561,3,1,8,280,2017-11-08 20:11:14,0
+96102,12,1,4,265,2017-11-07 11:41:23,0
+44555,2,1,19,219,2017-11-08 01:48:07,0
+121228,1,1,3,134,2017-11-08 01:54:35,0
+57938,18,1,12,134,2017-11-07 06:44:47,0
+3178,18,1,19,376,2017-11-07 04:06:08,0
+37506,2,1,41,237,2017-11-08 02:45:17,0
+100785,9,1,15,232,2017-11-08 01:37:54,0
+110574,9,1,18,234,2017-11-09 12:39:38,0
+114276,12,1,17,178,2017-11-09 10:42:43,0
+121028,20,1,17,259,2017-11-09 13:39:15,0
+3472,15,1,19,130,2017-11-09 14:19:28,0
+163535,3,1,8,442,2017-11-07 07:08:42,0
+168944,9,1,18,466,2017-11-09 11:42:22,0
+136327,9,1,19,232,2017-11-07 02:25:09,0
+95111,18,1,13,107,2017-11-07 13:33:29,0
+14256,2,1,8,219,2017-11-08 12:25:12,0
+5348,14,1,52,480,2017-11-08 10:48:09,0
+60752,1,1,19,134,2017-11-09 10:46:39,0
+123839,15,1,8,430,2017-11-09 05:12:30,0
+56411,3,1,16,280,2017-11-09 07:18:22,0
+20556,13,1,41,477,2017-11-07 14:49:54,0
+92712,12,1,19,340,2017-11-09 13:24:02,0
+93460,3,1,13,205,2017-11-08 06:25:17,0
+201182,2,1,12,219,2017-11-07 02:46:14,0
+108794,18,3032,607,107,2017-11-07 08:23:40,0
+206140,1,1,19,125,2017-11-08 22:16:38,0
+3605,18,1,9,439,2017-11-08 15:52:56,0
+131725,3,1,13,280,2017-11-08 23:44:04,0
+212004,12,1,13,259,2017-11-07 08:39:09,0
+89306,15,1,8,245,2017-11-09 01:35:17,0
+64367,2,1,13,219,2017-11-07 02:33:38,0
+36132,18,1,18,107,2017-11-07 11:09:36,0
+85928,12,1,19,178,2017-11-08 08:45:58,0
+93861,3,1,10,280,2017-11-08 14:02:46,0
+75628,15,1,13,140,2017-11-08 12:22:57,0
+8995,15,1,25,315,2017-11-07 07:33:25,0
+59997,14,1,2,401,2017-11-06 23:39:09,0
+107937,3,1,19,480,2017-11-09 03:38:07,0
+198832,12,1,16,178,2017-11-06 23:25:06,0
+23098,2,1,19,469,2017-11-07 05:54:11,0
+51496,2,1,19,435,2017-11-07 00:39:35,0
+281258,11,1,3,487,2017-11-08 11:01:32,0
+50969,13,1,8,477,2017-11-07 10:25:29,0
+98293,12,2,65,497,2017-11-08 13:10:29,0
+1074,15,1,19,430,2017-11-07 15:13:03,0
+111191,3,1,8,280,2017-11-07 03:02:27,0
+107897,35,1,13,274,2017-11-07 04:47:22,1
+87879,21,1,6,128,2017-11-08 23:02:19,0
+152339,64,1,31,459,2017-11-07 06:39:29,0
+362038,15,1,13,412,2017-11-09 00:43:32,0
+102062,2,1,49,236,2017-11-08 11:53:09,0
+80163,15,1,12,480,2017-11-08 23:16:38,0
+364625,45,1,18,5,2017-11-09 10:01:40,0
+100806,12,1,6,259,2017-11-06 19:55:21,0
+26384,2,1,19,364,2017-11-07 12:42:46,0
+5348,20,2,79,259,2017-11-07 07:12:07,0
+60698,12,1,30,140,2017-11-07 09:41:43,0
+52024,12,2,20,326,2017-11-08 15:07:20,0
+40767,13,1,17,469,2017-11-09 03:35:48,0
+25128,9,1,19,107,2017-11-09 10:18:57,0
+128548,18,1,6,107,2017-11-08 14:45:40,0
+158417,1,1,20,178,2017-11-07 03:32:21,0
+137052,15,1,17,245,2017-11-09 05:21:25,0
+108324,12,1,19,259,2017-11-09 13:48:29,0
+55326,3,1,10,280,2017-11-08 05:03:20,0
+220540,23,1,19,153,2017-11-08 13:58:02,0
+106559,18,1,22,121,2017-11-08 10:19:44,0
+95820,9,1,13,466,2017-11-08 04:49:05,0
+25097,8,2,17,145,2017-11-09 13:18:58,0
+182971,1,1,3,150,2017-11-07 02:25:31,0
+74257,24,2,10,105,2017-11-07 07:49:55,0
+26888,15,1,13,245,2017-11-07 14:31:07,0
+198081,12,1,19,178,2017-11-07 02:35:18,0
+2600,9,1,6,466,2017-11-08 17:11:08,0
+34057,12,1,23,259,2017-11-08 10:17:46,0
+6326,19,0,29,343,2017-11-09 10:56:51,0
+35793,3,1,8,442,2017-11-09 03:35:20,0
+160812,9,1,19,127,2017-11-09 11:11:18,0
+255,13,1,96,477,2017-11-08 17:06:14,0
+90408,17,1,13,280,2017-11-08 23:43:24,0
+11448,18,1,20,121,2017-11-09 04:26:13,0
+64516,9,1,19,442,2017-11-08 00:19:43,0
+13479,11,1,13,360,2017-11-09 09:26:09,0
+125222,21,1,19,232,2017-11-09 12:00:08,0
+121127,15,1,19,386,2017-11-09 09:47:42,0
+154162,14,1,2,442,2017-11-07 08:22:03,0
+32591,24,1,19,178,2017-11-07 11:39:28,0
+45745,21,1,19,128,2017-11-08 04:27:56,0
+11272,23,1,13,153,2017-11-09 03:52:27,0
+10813,11,1,10,319,2017-11-08 04:01:00,0
+6262,20,1,19,478,2017-11-07 02:39:19,0
+110354,27,1,13,153,2017-11-07 15:54:22,0
+100275,15,1,18,245,2017-11-08 15:05:10,0
+18101,3,1,17,280,2017-11-07 10:00:07,0
+50131,3,1,53,137,2017-11-09 14:58:44,0
+109703,18,1,13,439,2017-11-09 14:47:05,0
+45609,3,1,1,280,2017-11-08 07:01:25,0
+298226,9,1,15,232,2017-11-09 09:30:58,0
+109839,18,1,6,107,2017-11-07 08:23:54,0
+45386,3,1,19,280,2017-11-08 04:06:36,0
+119278,12,1,18,265,2017-11-09 11:50:16,0
+165072,64,1,13,459,2017-11-07 03:50:35,0
+66013,6,1,8,125,2017-11-07 09:29:48,0
+62072,3,1,8,280,2017-11-08 15:55:45,0
+85150,17,2,32,280,2017-11-08 06:49:58,0
+58723,18,1,19,121,2017-11-06 17:42:28,0
+44377,14,1,6,489,2017-11-09 06:01:45,0
+77772,3,1,19,130,2017-11-07 23:29:08,0
+73487,3,1,17,153,2017-11-08 12:02:22,0
+53964,18,1,20,121,2017-11-08 05:27:26,0
+69899,2,1,19,219,2017-11-08 12:14:51,0
+63149,3,1,10,280,2017-11-08 11:10:12,0
+125100,12,1,19,19,2017-11-09 06:21:16,0
+128864,2,1,10,219,2017-11-06 20:12:52,0
+84168,12,1,19,178,2017-11-08 07:47:21,0
+207893,2,1,13,435,2017-11-07 09:47:24,0
+74810,12,1,19,265,2017-11-09 09:22:56,0
+5348,11,1,19,173,2017-11-08 02:42:50,0
+103648,18,1,19,134,2017-11-07 04:19:01,0
+100168,1,1,32,153,2017-11-07 07:59:18,0
+14374,18,1,13,107,2017-11-08 18:17:08,0
+115440,18,1,4,107,2017-11-07 17:17:27,0
+93836,26,1,19,121,2017-11-07 05:44:29,0
+81167,15,1,17,245,2017-11-08 16:16:44,0
+42326,18,1,18,107,2017-11-08 13:06:12,0
+199830,3,1,16,280,2017-11-08 07:14:44,0
+12313,8,1,13,145,2017-11-08 22:25:45,0
+277962,3,1,13,280,2017-11-08 02:00:00,0
+72322,12,1,13,245,2017-11-07 04:52:05,0
+61718,9,1,19,134,2017-11-09 01:44:23,0
+151547,12,2,13,265,2017-11-09 11:09:23,0
+31304,3,1,18,280,2017-11-08 04:04:00,0
+76811,2,1,13,243,2017-11-08 10:55:38,0
+216999,14,1,19,416,2017-11-08 00:56:07,0
+48820,15,1,26,245,2017-11-09 02:07:39,0
+52780,3,1,18,280,2017-11-07 06:13:41,0
+8469,9,1,19,134,2017-11-07 01:00:16,0
+195729,2,1,13,452,2017-11-09 06:53:51,0
+50906,12,1,3,178,2017-11-07 06:32:21,0
+88585,1,1,18,439,2017-11-07 00:35:47,0
+77099,2,1,13,435,2017-11-09 11:37:25,0
+39698,24,1,8,105,2017-11-07 15:08:12,0
+159355,14,1,17,123,2017-11-07 09:57:26,0
+235,12,1,19,140,2017-11-08 05:06:56,0
+3994,22,2,13,496,2017-11-07 11:33:55,0
+7664,64,1,19,459,2017-11-08 02:56:13,0
+44663,15,1,14,245,2017-11-07 03:39:10,0
+102206,15,2,13,111,2017-11-09 07:27:50,0
+88923,14,1,25,489,2017-11-09 10:10:40,0
+59142,2,1,17,237,2017-11-08 06:52:18,0
+72279,1,1,13,134,2017-11-07 23:48:09,0
+75007,1,1,13,137,2017-11-07 21:53:07,0
+6222,3,1,32,379,2017-11-09 10:55:01,0
+200949,3,1,6,424,2017-11-07 06:43:47,1
+120936,12,2,22,145,2017-11-09 15:59:32,0
+21802,3,1,13,205,2017-11-07 07:49:09,0
+223956,2,1,13,212,2017-11-08 05:36:57,0
+93263,13,2,20,477,2017-11-09 13:47:56,0
+66254,11,1,15,319,2017-11-07 05:07:08,0
+69975,3,1,19,489,2017-11-07 13:16:22,0
+114235,15,1,19,140,2017-11-06 17:15:34,0
+102942,3,2,17,280,2017-11-08 08:37:07,0
+345462,18,1,31,134,2017-11-09 03:16:22,0
+120597,3,1,19,115,2017-11-08 04:43:15,0
+83616,18,1,17,439,2017-11-08 05:20:32,0
+132992,12,1,13,265,2017-11-07 01:36:57,0
+48296,19,0,0,213,2017-11-09 08:29:55,0
+97716,3,1,13,137,2017-11-06 17:07:14,0
+10445,12,1,6,245,2017-11-08 03:34:11,0
+9916,18,1,19,439,2017-11-07 12:48:33,0
+30310,23,1,31,153,2017-11-09 08:11:37,0
+36395,1,1,14,17,2017-11-09 08:07:31,0
+81556,20,1,18,478,2017-11-07 19:40:20,0
+285450,24,1,13,105,2017-11-08 02:09:55,0
+103036,64,1,13,459,2017-11-07 11:48:23,0
+87604,3,1,25,280,2017-11-08 13:34:31,0
+50337,3,1,19,379,2017-11-09 14:03:47,0
+115681,26,1,14,266,2017-11-09 06:26:23,0
+43855,18,1,53,107,2017-11-07 19:06:12,0
+27038,12,1,19,328,2017-11-08 11:33:38,0
+36349,7,1,13,101,2017-11-07 10:37:16,0
+110167,15,1,13,386,2017-11-07 08:18:36,0
+7861,3,1,19,115,2017-11-07 13:17:17,0
+59595,18,3543,748,107,2017-11-07 23:36:28,0
+87073,12,1,15,265,2017-11-07 10:08:36,0
+76749,3,1,13,280,2017-11-07 04:23:18,0
+203553,3,1,13,137,2017-11-06 22:19:08,0
+58705,12,1,15,140,2017-11-06 16:57:22,0
+89680,12,1,15,328,2017-11-07 14:23:52,0
+53960,2,1,19,205,2017-11-08 03:52:37,0
+19272,18,1,26,107,2017-11-08 10:20:49,0
+358826,14,1,17,113,2017-11-09 08:06:43,0
+120415,2,1,1,237,2017-11-07 02:09:59,0
+180146,2,1,47,377,2017-11-07 00:33:21,0
+103831,15,1,19,265,2017-11-08 02:09:33,0
+71532,12,1,19,265,2017-11-07 00:55:04,0
+3749,6,1,10,459,2017-11-08 05:41:40,0
+123876,3,1,17,135,2017-11-08 02:57:18,0
+18312,9,1,19,466,2017-11-08 10:07:35,0
+6313,12,2,32,265,2017-11-08 18:40:29,0
+75574,56,1,18,406,2017-11-08 02:34:03,0
+85631,12,1,19,328,2017-11-07 09:39:50,0
+5348,7,1,9,101,2017-11-07 11:45:27,0
+184437,9,1,13,134,2017-11-06 16:33:39,0
+30587,64,1,23,459,2017-11-07 15:13:28,0
+3505,9,1,36,120,2017-11-08 02:12:59,0
+154111,1,1,19,153,2017-11-08 12:08:18,0
+39268,12,1,19,178,2017-11-08 14:06:41,0
+109676,2,1,37,435,2017-11-08 13:24:18,0
+75329,3,1,13,480,2017-11-07 10:37:26,0
+15899,45,1,17,213,2017-11-09 08:06:35,0
+177384,15,1,19,245,2017-11-06 16:18:54,0
+23871,64,1,18,459,2017-11-08 04:22:24,0
+133850,23,1,19,153,2017-11-08 01:53:12,0
+192290,32,1,19,376,2017-11-08 11:18:11,0
+87879,15,1,13,111,2017-11-08 13:51:11,0
+53715,15,1,16,245,2017-11-07 15:57:03,0
+26379,21,1,19,232,2017-11-09 04:10:00,0
+69595,11,1,13,173,2017-11-07 02:32:54,0
+148349,8,1,13,145,2017-11-08 07:11:24,0
+42904,15,1,19,379,2017-11-07 00:20:37,0
+154133,1,1,15,377,2017-11-07 06:53:12,0
+109147,2,1,22,435,2017-11-07 15:54:59,0
+58538,2,1,19,236,2017-11-08 02:56:16,0
+149910,14,1,19,463,2017-11-07 14:43:39,0
+194193,2,1,12,237,2017-11-07 04:55:55,0
+152257,2,1,9,219,2017-11-07 02:28:54,0
+30880,2,1,9,477,2017-11-08 06:42:55,0
+16955,21,1,6,128,2017-11-07 11:03:42,0
+39328,150,1,22,110,2017-11-08 06:48:21,0
+203736,15,1,19,140,2017-11-09 12:03:55,0
+110386,2,1,19,469,2017-11-07 00:02:23,0
+114314,3,1,1,211,2017-11-08 02:53:40,0
+47902,3,1,13,173,2017-11-09 06:51:10,0
+92872,9,1,13,232,2017-11-09 14:46:32,0
+44361,11,1,19,173,2017-11-07 04:14:35,0
+98581,6,1,13,459,2017-11-08 04:09:07,0
+40259,13,1,19,477,2017-11-09 15:42:11,0
+144421,3,1,22,280,2017-11-08 04:12:54,0
+18821,14,1,13,439,2017-11-09 11:13:28,0
+189032,13,1,16,477,2017-11-09 04:21:25,0
+66356,3,1,13,211,2017-11-08 16:32:00,0
+17149,8,1,19,145,2017-11-09 14:35:07,0
+26908,3,1,6,280,2017-11-06 16:25:44,0
+94093,12,1,53,265,2017-11-07 04:00:57,0
+118307,8,1,19,145,2017-11-08 03:50:40,0
+143129,12,1,19,265,2017-11-07 00:49:36,0
+115634,2,1,15,205,2017-11-07 09:57:48,0
+105560,1,1,19,452,2017-11-07 03:12:19,0
+142483,62,1,19,21,2017-11-08 07:23:57,0
+201256,9,1,3,442,2017-11-07 03:49:45,0
+117373,2,1,19,219,2017-11-09 08:07:42,0
+265228,2,1,1,477,2017-11-08 12:47:01,0
+77050,12,1,13,245,2017-11-07 09:52:05,0
+164253,35,1,19,21,2017-11-08 09:49:59,1
+125842,11,1,18,487,2017-11-08 03:59:07,0
+139555,18,1,19,121,2017-11-09 07:13:13,0
+57519,3,1,8,480,2017-11-09 07:49:41,0
+5812,3,1,17,466,2017-11-06 23:29:10,0
+29695,3,1,27,280,2017-11-08 10:26:16,0
+193978,18,1,13,107,2017-11-09 01:19:20,0
+137716,21,1,19,128,2017-11-07 15:35:08,0
+3339,3,1,9,137,2017-11-09 07:10:12,0
+49006,3,1,8,280,2017-11-09 00:27:05,0
+190731,2,1,10,219,2017-11-07 15:36:50,0
+28011,9,1,19,466,2017-11-08 15:03:22,0
+105560,19,0,24,213,2017-11-08 13:18:00,0
+46482,20,2,19,259,2017-11-07 18:00:17,0
+111330,27,1,13,122,2017-11-08 23:36:26,0
+74062,9,1,19,445,2017-11-09 12:55:30,0
+80228,9,1,6,334,2017-11-07 12:09:39,0
+65028,14,1,19,379,2017-11-08 22:42:44,0
+125222,8,1,9,145,2017-11-08 00:55:30,0
+97067,14,1,13,467,2017-11-09 06:17:28,0
+117559,18,1,18,107,2017-11-07 00:01:40,0
+89113,15,1,25,130,2017-11-09 07:00:26,0
+120202,18,1,35,107,2017-11-07 08:22:22,0
+95509,1,1,9,153,2017-11-08 09:45:57,0
+66232,23,1,25,153,2017-11-07 11:42:02,0
+23260,15,1,37,386,2017-11-09 13:02:25,0
+61715,9,1,13,334,2017-11-09 15:37:16,0
+204885,10,1,28,317,2017-11-07 04:57:32,0
+106493,7,1,15,101,2017-11-09 06:34:41,0
+73516,28,1,13,135,2017-11-08 04:17:09,0
+4214,18,1,13,107,2017-11-08 08:35:08,0
+83429,9,1,17,442,2017-11-09 09:49:52,0
+50430,21,1,19,128,2017-11-08 00:06:20,0
+5314,8,2,9,145,2017-11-09 04:28:40,0
+100212,12,1,13,212,2017-11-08 00:42:53,0
+212126,15,1,3,412,2017-11-09 05:38:11,0
+50411,14,1,40,463,2017-11-08 15:51:12,0
+74777,28,1,19,135,2017-11-07 14:30:19,0
+3994,3,1,53,173,2017-11-09 15:13:00,0
+119222,1,1,22,125,2017-11-08 01:44:18,0
+25553,9,1,22,215,2017-11-06 16:02:34,0
+118440,2,1,13,477,2017-11-07 18:21:34,0
+5314,9,1,13,442,2017-11-07 10:05:18,0
+21990,315,1,19,110,2017-11-07 07:30:18,0
+42773,2,1,13,377,2017-11-07 15:33:49,0
+47235,9,1,22,107,2017-11-09 12:28:27,0
+26108,12,2,19,259,2017-11-09 07:00:13,0
+68822,3,1,19,115,2017-11-08 02:31:48,0
+20266,14,1,13,446,2017-11-09 14:23:49,0
+9362,12,1,19,497,2017-11-09 13:12:07,0
+37434,2,1,1,122,2017-11-07 22:57:06,0
+94971,2,1,22,435,2017-11-09 00:31:10,0
+182248,1,1,13,134,2017-11-07 23:40:33,0
+125736,24,1,19,105,2017-11-09 08:33:11,0
+96801,12,1,13,424,2017-11-07 02:58:51,0
+8506,11,1,10,325,2017-11-08 08:03:30,0
+67776,18,1,18,439,2017-11-07 04:33:08,0
+111219,12,1,19,328,2017-11-09 11:24:01,0
+32457,18,1,22,107,2017-11-09 02:02:39,0
+78353,12,1,13,265,2017-11-09 01:01:19,0
+85138,1,1,19,377,2017-11-08 06:40:17,0
+142361,12,1,13,328,2017-11-07 06:12:03,0
+121557,3,1,1,137,2017-11-08 09:26:44,0
+45121,12,1,19,328,2017-11-09 05:20:59,0
+53436,18,1,13,107,2017-11-08 02:47:16,0
+105414,12,1,13,205,2017-11-09 05:31:35,0
+40077,3,2,8,115,2017-11-09 01:26:59,0
+5250,9,1,19,334,2017-11-09 10:44:24,0
+18445,18,1,18,107,2017-11-08 17:37:03,0
+101250,1,2,36,153,2017-11-07 05:07:44,0
+15643,14,1,17,463,2017-11-07 03:11:31,0
+95820,18,1,19,107,2017-11-07 13:15:16,0
+23183,15,1,19,111,2017-11-08 02:57:09,0
+75203,12,1,17,424,2017-11-07 14:17:55,0
+177728,3,1,12,480,2017-11-09 00:28:35,0
+201182,12,1,10,328,2017-11-08 18:31:24,0
+9832,3,1,19,442,2017-11-06 22:27:59,0
+44494,12,2,19,326,2017-11-08 05:56:39,0
+40639,26,1,10,121,2017-11-08 16:06:07,0
+280992,21,2,79,128,2017-11-09 11:47:11,0
+75799,12,1,19,259,2017-11-07 11:02:47,0
+22037,7,1,19,101,2017-11-07 08:59:26,0
+125222,12,1,10,409,2017-11-07 14:34:56,0
+48646,24,1,17,105,2017-11-07 14:29:34,0
+15474,9,1,27,334,2017-11-08 12:05:46,0
+23723,15,1,13,265,2017-11-07 06:04:58,0
+3102,13,1,13,477,2017-11-08 00:14:43,0
+10760,7,1,48,101,2017-11-07 10:41:55,0
+105447,9,1,13,234,2017-11-06 19:40:52,0
+123350,15,1,19,245,2017-11-07 04:41:54,0
+72900,2,1,19,212,2017-11-07 12:17:44,0
+64014,15,1,12,153,2017-11-07 08:23:59,0
+107954,2,1,19,477,2017-11-08 15:27:51,0
+83616,12,1,3,178,2017-11-09 07:01:11,0
+77399,9,1,23,232,2017-11-08 12:30:41,0
+344865,3,2,13,280,2017-11-09 01:51:56,0
+57528,3,1,15,137,2017-11-09 13:21:12,0
+6351,18,1,13,107,2017-11-07 09:31:04,0
+108202,9,1,866,232,2017-11-09 10:01:31,0
+308779,23,1,19,153,2017-11-09 12:57:38,0
+345196,2,1,6,477,2017-11-09 14:45:37,0
+94214,15,1,25,245,2017-11-09 01:07:46,0
+77582,3,1,30,489,2017-11-09 00:34:39,0
+195925,3,2,9,115,2017-11-07 12:51:10,0
+83883,3,1,28,280,2017-11-09 07:11:26,0
+218264,3,1,13,280,2017-11-08 03:11:20,0
+63246,2,1,18,122,2017-11-08 01:01:00,0
+3432,14,1,19,442,2017-11-09 03:33:09,0
+264331,3,1,19,409,2017-11-09 10:35:37,0
+142197,18,1,17,107,2017-11-07 01:39:35,0
+108913,15,1,13,278,2017-11-07 14:37:37,0
+145963,18,1,17,107,2017-11-07 19:25:12,0
+36213,15,1,18,3,2017-11-07 10:25:12,0
+29315,3,1,19,280,2017-11-08 11:30:47,0
+73724,3,1,18,173,2017-11-08 11:32:35,0
+69925,3,1,17,424,2017-11-07 10:53:02,0
+11083,3,1,13,173,2017-11-07 17:59:44,0
+121352,14,1,20,134,2017-11-09 06:33:57,0
+154541,3,1,13,489,2017-11-09 12:55:39,0
+204509,15,1,19,111,2017-11-09 06:26:25,0
+59540,3,1,19,280,2017-11-08 10:29:59,0
+60473,9,1,10,134,2017-11-09 10:19:29,0
+34772,9,1,9,334,2017-11-06 18:00:45,0
+96015,11,1,13,122,2017-11-07 10:15:59,0
+99150,8,1,13,145,2017-11-08 05:15:15,0
+126129,18,1,30,439,2017-11-07 04:33:11,0
+1365,9,1,8,134,2017-11-07 02:36:48,0
+50657,3,1,8,211,2017-11-08 23:38:02,0
+6360,12,1,28,326,2017-11-07 15:07:37,0
+121472,14,1,28,401,2017-11-08 20:52:05,0
+168010,1,1,17,134,2017-11-08 06:31:15,0
+137151,9,1,20,466,2017-11-08 09:44:36,0
+63305,9,1,19,215,2017-11-09 15:03:03,0
+65913,3,1,19,280,2017-11-09 04:01:50,0
+138561,12,1,13,245,2017-11-08 14:08:49,0
+92583,18,1,10,439,2017-11-07 12:10:16,0
+87879,1,1,13,135,2017-11-07 14:48:03,0
+32490,3,1,15,442,2017-11-09 08:43:05,0
+26990,3,1,17,205,2017-11-07 11:47:55,0
+100895,9,1,19,215,2017-11-07 03:10:44,0
+56402,14,1,13,439,2017-11-07 13:43:47,0
+37417,9,1,10,442,2017-11-07 15:47:20,0
+313518,28,1,19,135,2017-11-09 10:58:29,0
+58472,12,2,19,245,2017-11-09 04:39:16,0
+64615,11,1,19,487,2017-11-07 01:06:27,0
+74687,8,1,13,145,2017-11-06 22:46:07,0
+110327,2,1,19,477,2017-11-07 05:08:03,0
+138561,9,1,19,134,2017-11-07 14:30:19,0
+183060,2,1,9,364,2017-11-09 15:34:59,0
+196165,11,1,19,481,2017-11-07 09:24:02,0
+42035,47,1,19,484,2017-11-09 13:12:20,0
+199976,3,1,13,452,2017-11-09 00:20:28,0
+52881,9,1,19,258,2017-11-09 04:35:56,0
+88856,3,1,13,280,2017-11-07 01:36:40,0
+224571,12,1,3,328,2017-11-09 13:55:48,0
+33503,26,1,19,477,2017-11-08 21:09:06,0
+11286,26,1,13,266,2017-11-07 04:07:52,0
+93155,3,1,10,280,2017-11-07 04:55:20,0
+94245,23,1,41,153,2017-11-09 02:24:22,0
+188121,32,1,19,376,2017-11-08 14:00:42,0
+12788,3,1,19,280,2017-11-08 06:35:21,0
+118190,15,1,70,245,2017-11-08 14:54:25,0
+154542,6,1,14,459,2017-11-08 18:00:19,0
+84612,2,1,17,237,2017-11-09 08:12:08,0
+70770,23,1,17,153,2017-11-08 05:19:41,0
+102025,9,1,10,466,2017-11-07 08:12:01,0
+4102,9,1,3,334,2017-11-09 11:33:26,0
+7808,14,1,3,439,2017-11-08 19:41:34,0
+72166,12,1,20,259,2017-11-08 05:10:31,0
+41112,2,1,13,219,2017-11-08 02:58:16,0
+118229,23,1,58,153,2017-11-09 06:43:24,0
+5448,1,1,13,178,2017-11-07 10:43:02,0
+55003,18,1,1,134,2017-11-07 16:01:33,0
+169912,2,1,13,122,2017-11-07 11:00:49,0
+5348,2,1,13,236,2017-11-09 03:59:26,0
+77277,20,1,13,478,2017-11-07 14:14:04,0
+73516,15,1,35,315,2017-11-07 10:21:40,0
+1334,14,1,13,442,2017-11-07 04:05:19,0
+96177,26,1,19,266,2017-11-07 06:45:35,0
+22266,12,1,16,265,2017-11-07 08:37:22,0
+207753,3,1,8,280,2017-11-08 15:06:06,0
+101941,11,2,35,137,2017-11-08 12:56:04,0
+77572,15,1,15,140,2017-11-07 20:40:51,0
+119473,15,1,11,245,2017-11-07 13:11:05,0
+124146,2,1,18,435,2017-11-07 17:20:30,0
+66596,18,1,13,121,2017-11-07 10:09:57,0
+81983,3,1,19,280,2017-11-07 05:20:03,0
+63979,26,1,19,477,2017-11-09 07:17:36,0
+247965,3,1,13,19,2017-11-08 14:56:04,0
+88217,17,1,15,280,2017-11-07 12:27:55,0
+182500,64,1,15,459,2017-11-07 09:06:59,0
+117383,3,1,3,280,2017-11-08 15:38:53,0
+59395,64,1,17,459,2017-11-08 08:24:57,0
+343992,12,1,41,265,2017-11-09 10:08:01,0
+5314,27,1,27,153,2017-11-09 11:15:38,0
+38719,15,1,31,245,2017-11-07 04:11:13,0
+67712,18,1,13,107,2017-11-08 08:14:14,0
+201182,25,1,19,259,2017-11-07 16:45:26,0
+191163,18,1,22,107,2017-11-07 04:20:10,0
+185010,12,1,37,265,2017-11-07 14:29:46,0
+22804,2,1,17,469,2017-11-07 13:22:28,0
+121909,15,1,19,315,2017-11-07 12:08:11,0
+48240,2,1,19,237,2017-11-09 14:54:11,0
+98505,15,1,35,245,2017-11-08 16:36:58,0
+137968,9,1,13,334,2017-11-09 06:56:36,0
+39802,15,2,25,3,2017-11-09 11:10:16,0
+71810,18,1,19,439,2017-11-07 09:48:26,0
+26218,13,1,13,469,2017-11-07 10:20:12,0
+84543,1,1,22,134,2017-11-09 00:16:38,0
+96592,2,1,32,122,2017-11-09 01:59:31,0
+185936,21,1,19,128,2017-11-07 04:09:06,0
+4100,3,1,17,137,2017-11-07 09:54:07,0
+3994,1,1,19,134,2017-11-08 06:55:24,0
+98102,9,1,35,442,2017-11-09 09:09:07,0
+41786,15,1,25,265,2017-11-08 18:46:36,0
+104395,1,1,22,115,2017-11-08 00:01:24,0
+46701,2,1,20,219,2017-11-07 16:03:38,0
+210083,14,2,9,123,2017-11-07 10:39:08,0
+66918,15,1,19,245,2017-11-08 16:05:10,0
+216027,6,1,19,459,2017-11-08 07:53:44,0
+5853,13,1,53,477,2017-11-08 01:34:05,0
+28010,3,1,13,442,2017-11-09 01:30:06,0
+201084,9,1,19,334,2017-11-08 03:25:58,0
+344509,12,1,14,178,2017-11-08 16:07:15,0
+291006,3,1,32,19,2017-11-08 22:49:24,0
+102962,3,1,20,409,2017-11-08 07:07:33,0
+26934,3,1,13,19,2017-11-08 07:24:12,0
+78672,25,1,13,259,2017-11-09 13:38:43,0
+51887,1,1,15,134,2017-11-07 16:14:49,0
+118512,2,1,13,237,2017-11-07 12:01:53,0
+48679,21,1,19,128,2017-11-09 06:56:53,0
+191961,18,1,19,134,2017-11-08 23:03:10,0
+124459,15,1,13,480,2017-11-08 02:03:30,0
+108481,8,1,15,145,2017-11-06 16:04:54,0
+98912,9,1,37,244,2017-11-09 03:06:56,0
+4680,14,1,19,379,2017-11-08 06:56:02,0
+783,11,1,6,319,2017-11-09 10:31:45,0
+200168,12,2,19,145,2017-11-06 18:10:56,0
+95181,18,1,47,107,2017-11-07 08:36:45,0
+71128,18,1,14,107,2017-11-08 16:21:25,0
+81980,8,1,27,145,2017-11-09 05:07:09,0
+359632,2,1,6,477,2017-11-08 19:25:36,0
+77361,23,1,20,153,2017-11-08 04:58:45,0
+340563,3,1,13,205,2017-11-09 08:58:38,0
+102394,18,1,10,121,2017-11-06 23:32:12,0
+89458,7,1,19,101,2017-11-09 07:19:38,0
+29252,2,1,18,219,2017-11-07 05:39:53,0
+86926,11,1,13,173,2017-11-09 07:34:33,0
+7938,1,2,19,134,2017-11-08 11:59:35,0
+95766,18,1,13,107,2017-11-07 07:19:33,0
+26184,9,1,19,258,2017-11-09 02:00:12,0
+100654,12,1,17,409,2017-11-08 15:26:27,0
+232000,1,1,19,125,2017-11-08 11:28:26,0
+49602,8,1,13,140,2017-11-08 11:28:03,0
+53085,18,1,18,107,2017-11-07 14:17:43,0
+892,18,1,15,107,2017-11-08 13:18:12,0
+21046,14,1,32,401,2017-11-07 22:28:57,0
+179199,2,1,47,469,2017-11-08 06:35:53,0
+97712,3,1,13,280,2017-11-07 07:21:05,0
+6262,13,1,16,449,2017-11-08 02:12:15,0
+27229,15,1,13,430,2017-11-06 23:38:53,0
+360112,3,1,6,135,2017-11-09 03:12:47,0
+130996,2,1,13,477,2017-11-09 15:00:36,0
+105560,12,1,53,242,2017-11-09 01:39:33,0
+202693,24,1,10,105,2017-11-09 15:08:22,0
+143708,10,1,32,317,2017-11-08 03:12:20,0
+44256,13,1,17,477,2017-11-07 09:11:32,0
+27038,10,1,15,377,2017-11-08 04:08:21,0
+11221,12,1,10,265,2017-11-08 06:45:05,0
+43793,1,1,12,17,2017-11-08 14:52:48,0
+149129,12,2,13,178,2017-11-07 12:31:25,0
+73174,14,1,13,439,2017-11-09 05:06:23,0
+13126,3,1,13,480,2017-11-08 17:09:58,0
+25152,18,1,19,107,2017-11-09 15:45:13,0
+211594,3,1,41,424,2017-11-08 09:47:38,0
+45745,13,1,19,477,2017-11-07 14:00:03,0
+106511,1,1,19,153,2017-11-08 13:00:19,0
+98261,3,1,19,280,2017-11-09 04:57:14,0
+6681,15,1,13,3,2017-11-08 10:47:34,0
+77277,2,1,16,219,2017-11-09 01:52:42,0
+82039,3,1,41,115,2017-11-07 13:17:36,0
+4930,3,1,11,442,2017-11-07 14:08:39,0
+89405,13,1,20,469,2017-11-08 11:46:59,0
+72460,13,1,2,477,2017-11-06 17:06:26,0
+29226,9,1,19,134,2017-11-06 16:18:06,0
+38959,72,1,18,101,2017-11-08 11:24:52,1
+7318,12,1,9,259,2017-11-09 10:25:47,0
+85050,3,1,13,379,2017-11-07 11:08:34,0
+53795,12,1,6,245,2017-11-08 15:14:58,0
+44670,18,1,19,376,2017-11-09 09:23:26,0
+16281,3,1,13,137,2017-11-06 17:07:02,0
+34366,14,1,19,401,2017-11-07 11:04:14,0
+145747,10,1,10,317,2017-11-08 15:26:17,0
+35799,12,2,9,145,2017-11-07 17:11:44,0
+308950,14,1,8,113,2017-11-09 12:58:55,0
+54657,2,1,13,477,2017-11-07 04:51:31,0
+139344,2,1,19,435,2017-11-07 06:53:32,0
+75750,12,1,19,265,2017-11-09 09:11:38,0
+107164,12,1,19,424,2017-11-07 11:50:54,0
+43622,18,1,49,134,2017-11-09 06:50:00,0
+125423,3,1,19,280,2017-11-09 03:42:51,0
+346524,2,1,2,477,2017-11-09 12:10:06,0
+76944,64,1,37,459,2017-11-07 11:12:24,0
+162959,3,1,17,442,2017-11-07 09:49:58,0
+245237,12,1,13,245,2017-11-07 16:53:05,0
+216496,12,1,22,178,2017-11-08 07:19:01,0
+246966,26,1,19,477,2017-11-08 08:35:08,0
+20015,14,1,28,489,2017-11-08 10:34:08,0
+113630,1,1,19,452,2017-11-07 05:04:24,0
+8259,2,1,19,237,2017-11-09 06:39:43,0
+154542,3,1,13,280,2017-11-07 06:47:55,0
+98424,6,1,19,125,2017-11-07 20:47:32,0
+6516,11,1,8,469,2017-11-09 12:17:27,0
+27174,23,1,13,153,2017-11-08 06:19:01,0
+139753,12,1,19,265,2017-11-07 05:00:34,0
+5348,18,1,12,107,2017-11-09 15:33:15,0
+26995,12,2,19,178,2017-11-07 02:29:57,0
+76953,15,1,19,153,2017-11-07 04:25:27,0
+17899,72,1,13,101,2017-11-09 09:10:42,0
+86779,15,1,55,111,2017-11-08 14:32:56,0
+121302,2,1,19,477,2017-11-09 08:03:04,0
+121091,12,1,22,245,2017-11-08 06:28:22,0
+185956,13,1,19,477,2017-11-07 10:40:54,0
+145549,3,1,19,19,2017-11-08 01:11:01,0
+41447,18,1,34,376,2017-11-07 05:42:50,0
+109340,1,1,6,153,2017-11-09 15:41:46,0
+74550,9,1,14,244,2017-11-09 09:08:02,0
+147957,109,0,59,347,2017-11-07 08:01:11,0
+109880,18,1,37,134,2017-11-09 00:41:20,0
+51628,3,1,8,280,2017-11-09 05:48:40,0
+85964,9,1,35,215,2017-11-07 02:19:38,0
+18788,2,1,3,236,2017-11-08 02:20:20,0
+271586,2,1,23,237,2017-11-08 06:17:32,0
+44445,3,1,6,280,2017-11-09 01:38:46,0
+5314,3,1,18,424,2017-11-08 05:31:49,0
+71285,2,1,25,435,2017-11-09 06:15:01,0
+137933,15,1,19,153,2017-11-09 07:08:39,0
+97067,9,1,13,232,2017-11-09 12:51:26,0
+46449,12,2,19,140,2017-11-09 12:04:28,0
+15530,32,1,20,376,2017-11-08 12:37:00,0
+58741,26,1,19,121,2017-11-07 06:57:07,0
+117050,15,1,25,3,2017-11-06 23:39:10,0
+278480,18,1,6,107,2017-11-08 01:48:05,0
+92927,12,1,13,265,2017-11-08 09:01:41,0
+68328,14,1,47,467,2017-11-09 15:27:17,0
+104468,25,1,22,259,2017-11-07 00:30:06,0
+115947,18,1,22,439,2017-11-07 07:01:01,0
+330714,3,1,14,280,2017-11-09 15:08:58,0
+119531,15,2,19,140,2017-11-09 01:00:18,0
+37078,3,1,18,137,2017-11-09 07:11:20,0
+100065,3,1,35,280,2017-11-08 02:19:27,0
+64510,3,1,14,280,2017-11-08 02:31:33,0
+76987,2,1,19,237,2017-11-07 04:04:12,0
+179384,12,1,37,140,2017-11-06 23:35:19,0
+117414,12,1,19,219,2017-11-07 05:11:19,0
+45781,9,1,22,127,2017-11-09 11:57:59,0
+16426,64,1,19,459,2017-11-07 05:41:46,0
+22283,12,1,13,178,2017-11-06 23:58:26,0
+81503,12,1,19,340,2017-11-08 05:02:33,0
+5200,3,1,13,280,2017-11-08 07:30:54,0
+20996,1,1,37,134,2017-11-09 14:14:45,0
+67838,23,1,8,153,2017-11-09 00:22:22,0
+58585,3,1,25,280,2017-11-07 01:06:19,0
+100157,12,1,53,205,2017-11-08 00:57:38,0
+72539,18,1,13,107,2017-11-07 08:49:02,0
+53193,9,1,19,334,2017-11-07 23:53:17,0
+81480,3,1,18,280,2017-11-09 04:44:50,0
+81736,15,1,19,245,2017-11-07 15:47:38,0
+71076,15,1,14,379,2017-11-09 00:32:40,0
+150215,12,1,19,265,2017-11-08 05:35:41,0
+81514,15,1,41,245,2017-11-08 11:20:41,0
+99140,9,1,17,134,2017-11-09 13:29:07,0
+100393,2,1,17,205,2017-11-08 02:16:39,0
+123993,3,1,1,280,2017-11-09 03:19:35,0
+29385,11,1,19,325,2017-11-07 05:58:28,0
+70834,64,1,10,459,2017-11-06 17:30:34,0
+124198,12,1,13,140,2017-11-08 02:39:42,0
+133136,26,1,37,477,2017-11-08 23:52:15,0
+191416,3,1,15,280,2017-11-08 11:23:52,0
+239406,64,1,22,459,2017-11-08 01:51:08,0
+111517,1,1,13,153,2017-11-07 00:13:03,0
+6908,3,1,4,280,2017-11-08 01:26:01,0
+22990,26,1,19,266,2017-11-08 06:04:01,0
+222132,12,1,13,265,2017-11-08 10:26:37,0
+202200,3,1,39,211,2017-11-09 09:09:59,0
+62175,3,1,17,280,2017-11-08 04:01:49,0
+125260,3,1,13,211,2017-11-08 07:44:48,0
+39479,3,1,28,280,2017-11-09 06:42:41,0
+98531,12,1,9,265,2017-11-07 08:44:59,0
+115152,14,1,10,439,2017-11-09 15:00:54,0
+71267,27,1,16,153,2017-11-09 01:56:11,0
+32791,3,1,10,19,2017-11-07 05:35:45,0
+75899,3,1,13,280,2017-11-07 04:16:05,0
+38872,3,1,3,379,2017-11-07 12:16:33,0
+249497,12,1,13,178,2017-11-08 15:50:39,0
+76811,2,1,2,469,2017-11-08 00:23:33,0
+43793,18,1,34,107,2017-11-06 23:49:25,0
+118315,26,1,13,121,2017-11-08 16:48:09,0
+7210,14,1,13,463,2017-11-07 23:22:30,0
+92721,14,1,13,478,2017-11-08 09:53:40,0
+108560,18,1,41,107,2017-11-08 07:33:28,0
+31597,94,1,19,361,2017-11-09 13:47:06,0
+82816,15,1,10,480,2017-11-09 11:39:53,0
+115663,1,1,13,134,2017-11-06 23:19:32,0
+91611,3,1,13,466,2017-11-09 14:40:13,0
+67621,19,18,0,213,2017-11-07 09:39:51,0
+20970,9,1,13,134,2017-11-07 02:43:28,0
+88281,3,1,9,424,2017-11-07 20:36:11,0
+55690,2,1,19,364,2017-11-07 14:05:56,0
+3219,3,1,22,115,2017-11-07 09:15:17,0
+62770,2,1,19,435,2017-11-07 05:14:04,0
+25553,12,2,18,140,2017-11-08 06:55:16,0
+63812,18,1,17,134,2017-11-09 06:48:55,0
+147957,14,1,19,349,2017-11-07 05:36:27,0
+90953,12,1,9,259,2017-11-07 14:37:07,0
+100393,3,1,13,280,2017-11-08 10:38:15,0
+156058,24,1,13,105,2017-11-07 00:43:34,0
+28247,14,1,13,401,2017-11-08 08:41:28,0
+67443,8,1,13,145,2017-11-07 04:58:41,0
+44327,64,1,20,459,2017-11-08 13:15:08,0
+25097,3,1,19,402,2017-11-07 05:31:17,0
+177921,26,1,8,121,2017-11-08 23:44:19,0
+95294,3,1,19,442,2017-11-08 09:38:20,0
+130484,9,1,16,134,2017-11-07 00:24:22,0
+106524,1,1,3,349,2017-11-07 03:16:10,0
+13487,12,1,13,242,2017-11-08 00:18:00,0
+108658,3,1,13,417,2017-11-08 04:31:39,0
+27879,12,1,19,265,2017-11-07 09:09:25,0
+194802,14,1,8,379,2017-11-07 14:34:26,0
+126693,2,1,22,477,2017-11-06 19:12:11,0
+96832,3,1,3,409,2017-11-09 04:41:31,0
+14737,3,2,19,452,2017-11-08 01:21:16,0
+18165,23,1,13,153,2017-11-07 01:19:37,0
+137443,6,1,20,459,2017-11-06 17:48:21,0
+6913,3,1,20,280,2017-11-07 04:41:43,0
+33412,21,1,22,128,2017-11-09 05:43:53,0
+5314,28,1,27,135,2017-11-09 14:06:32,0
+35815,3,1,13,280,2017-11-09 04:23:21,0
+81076,12,1,18,265,2017-11-09 14:10:50,0
+157743,2,1,19,452,2017-11-09 11:47:39,0
+28010,15,1,19,315,2017-11-09 15:09:54,0
+18839,9,1,19,134,2017-11-07 16:24:34,0
+118756,3,1,13,280,2017-11-07 05:01:55,0
+71643,3,1,15,489,2017-11-07 08:24:40,0
+110039,9,1,13,442,2017-11-07 02:05:18,0
+78966,12,1,19,205,2017-11-08 01:38:02,0
+37249,18,1,13,107,2017-11-09 12:23:49,0
+121108,25,1,10,259,2017-11-07 09:16:41,0
+5348,18,1,8,379,2017-11-09 12:57:08,0
+273533,12,1,19,265,2017-11-08 02:45:45,0
+29950,36,1,13,110,2017-11-09 02:59:48,0
+119369,24,2,18,105,2017-11-09 14:47:46,0
+200296,9,1,30,232,2017-11-07 08:15:27,0
+55840,2,1,19,364,2017-11-07 01:28:19,0
+278711,3,1,10,442,2017-11-08 06:16:02,0
+55705,13,1,10,469,2017-11-09 10:23:39,0
+37515,15,1,19,245,2017-11-07 14:06:11,0
+103358,3,1,19,480,2017-11-08 07:48:35,0
+27607,14,1,10,349,2017-11-08 05:46:02,0
+209663,14,1,13,480,2017-11-08 08:04:14,0
+84870,36,1,13,110,2017-11-08 11:13:58,0
+141879,15,1,18,386,2017-11-07 14:35:05,0
+53964,15,1,13,412,2017-11-08 09:23:06,0
+34143,18,1,13,107,2017-11-09 12:18:29,0
+158591,2,1,18,377,2017-11-09 04:27:44,0
+92890,20,1,19,478,2017-11-08 03:12:39,0
+247799,3,1,13,280,2017-11-09 02:29:32,0
+306019,2,1,17,212,2017-11-09 12:26:08,0
+5348,18,1,1,107,2017-11-07 13:30:27,0
+195965,15,1,13,111,2017-11-08 09:52:25,0
+246887,9,1,13,442,2017-11-08 06:00:36,0
+273598,20,1,19,259,2017-11-08 11:31:47,0
+125928,13,1,13,477,2017-11-08 13:21:32,0
+18049,9,1,19,334,2017-11-08 05:53:03,0
+159853,1,1,37,17,2017-11-07 09:26:59,0
+63267,2,1,19,317,2017-11-08 10:01:51,0
+333710,2,1,27,212,2017-11-08 20:41:12,0
+92648,12,1,13,245,2017-11-07 14:53:06,0
+30151,3,1,19,409,2017-11-09 04:24:50,0
+93003,3,1,41,211,2017-11-08 17:08:10,0
+18483,3,1,19,280,2017-11-09 02:08:10,0
+44327,3,1,19,280,2017-11-08 15:36:24,0
+117481,18,1,19,107,2017-11-09 04:23:19,0
+54125,2,1,13,236,2017-11-09 12:16:55,0
+2805,2,1,10,237,2017-11-08 05:05:54,0
+28082,64,1,48,459,2017-11-08 13:34:22,0
+89800,9,1,18,466,2017-11-08 16:02:33,0
+145896,12,1,49,178,2017-11-08 11:57:36,0
+326339,13,1,13,477,2017-11-09 09:25:42,0
+74999,12,1,40,424,2017-11-09 03:57:36,0
+154636,12,1,20,178,2017-11-07 08:23:35,0
+49996,15,1,19,265,2017-11-07 08:55:49,0
+31119,15,1,12,265,2017-11-07 12:28:37,0
+103536,8,1,19,145,2017-11-07 06:53:17,0
+47306,12,1,17,124,2017-11-09 03:28:14,0
+48212,2,1,19,237,2017-11-08 12:25:56,0
+53454,18,1,19,107,2017-11-07 14:58:09,0
+114276,3,1,15,115,2017-11-08 12:59:03,0
+282570,2,1,18,237,2017-11-08 04:14:47,0
+105475,18,1,20,121,2017-11-09 09:06:21,0
+60010,2,1,13,477,2017-11-07 05:47:14,0
+152362,3,1,19,280,2017-11-07 07:21:19,0
+10571,12,1,11,328,2017-11-08 14:05:06,0
+101407,64,1,23,459,2017-11-08 08:29:47,0
+95766,14,1,25,467,2017-11-09 13:30:45,0
+163707,9,2,19,258,2017-11-07 13:16:59,0
+129584,18,1,1,376,2017-11-08 11:55:51,0
+124897,2,1,19,205,2017-11-07 02:56:05,0
+109679,18,1,19,134,2017-11-08 21:31:22,0
+103495,3,1,19,280,2017-11-07 14:03:00,0
+27639,18,1,15,107,2017-11-09 02:18:49,0
+88964,12,1,18,259,2017-11-07 09:58:15,0
+178822,64,1,19,459,2017-11-08 07:36:45,0
+96367,9,1,15,134,2017-11-08 00:54:10,0
+60298,15,1,23,245,2017-11-06 16:16:52,0
+50136,15,1,19,3,2017-11-08 18:28:18,0
+320565,14,1,13,113,2017-11-09 10:23:53,0
+188929,10,1,9,377,2017-11-09 00:42:41,0
+89529,10,1,13,377,2017-11-08 08:11:33,0
+23522,11,1,20,173,2017-11-08 10:02:38,0
+66507,10,1,7,377,2017-11-09 13:53:48,0
+112302,94,1,13,361,2017-11-09 14:37:09,0
+124157,3,1,25,280,2017-11-07 02:57:15,0
+116947,2,1,8,122,2017-11-08 23:42:00,0
+154925,18,1,15,121,2017-11-07 09:24:57,0
+73516,8,2,35,145,2017-11-07 11:43:08,0
+5348,151,0,0,347,2017-11-07 20:19:21,0
+2076,15,1,13,130,2017-11-06 17:02:06,0
+109743,2,1,41,205,2017-11-08 04:25:34,0
+126767,12,1,17,328,2017-11-08 04:09:00,0
+168318,15,1,22,245,2017-11-07 12:43:03,0
+109504,12,1,47,497,2017-11-06 17:27:51,0
+360082,3,1,13,280,2017-11-09 04:06:38,0
+48170,20,1,19,259,2017-11-07 09:23:42,0
+99226,11,1,19,219,2017-11-07 02:19:59,0
+101096,14,1,10,442,2017-11-08 22:16:00,0
+28986,3,1,36,371,2017-11-08 00:21:24,0
+24582,14,1,10,489,2017-11-08 06:57:39,0
+75634,12,1,18,178,2017-11-09 03:20:45,0
+125081,9,2,9,107,2017-11-09 14:53:59,0
+19064,64,1,13,459,2017-11-07 11:37:28,0
+67763,3,1,22,137,2017-11-09 04:43:31,0
+41256,26,1,13,121,2017-11-09 08:22:25,0
+128210,15,1,31,153,2017-11-07 10:27:39,0
+40995,23,1,13,153,2017-11-07 02:03:19,0
+38379,18,1,13,376,2017-11-08 05:37:08,0
+1395,3,1,19,489,2017-11-09 03:26:07,0
+153937,14,1,20,401,2017-11-08 03:12:33,0
+13634,18,1,18,121,2017-11-09 04:19:26,0
+25588,2,1,19,205,2017-11-09 01:10:46,0
+198559,8,1,25,145,2017-11-09 11:29:03,0
+99754,3,1,13,173,2017-11-09 13:33:59,0
+63597,3,1,22,280,2017-11-07 05:30:00,0
+159889,15,1,20,315,2017-11-07 01:08:48,0
+227947,15,1,23,412,2017-11-08 00:16:44,0
+236755,3,1,12,137,2017-11-08 06:17:56,0
+53889,12,1,13,259,2017-11-08 03:44:31,0
+29271,18,1,32,134,2017-11-08 07:54:41,0
+25753,3,1,13,280,2017-11-09 07:27:39,0
+107155,56,1,13,406,2017-11-09 13:42:04,0
+33835,2,1,19,452,2017-11-07 01:20:07,0
+298312,19,0,29,347,2017-11-09 08:53:17,1
+123813,3,1,13,280,2017-11-09 01:10:05,0
+116235,18,1,25,107,2017-11-08 15:52:41,0
+108341,21,2,49,232,2017-11-09 08:46:49,0
+137764,9,1,25,334,2017-11-07 01:12:09,0
+239848,3,1,19,280,2017-11-08 14:29:04,0
+192967,2,2,63,364,2017-11-07 16:02:19,0
+207807,26,1,19,266,2017-11-07 06:01:52,0
+88723,25,2,9,259,2017-11-08 02:42:06,0
+88637,2,1,13,237,2017-11-09 00:11:58,0
+24795,9,1,14,334,2017-11-07 05:25:49,0
+135374,14,1,18,401,2017-11-08 01:56:11,0
+83662,12,1,19,409,2017-11-08 00:48:21,0
+45745,9,1,19,127,2017-11-09 01:01:45,0
+49431,12,1,18,265,2017-11-09 04:08:17,0
+19869,3,1,13,115,2017-11-07 14:05:26,0
+18682,36,1,17,110,2017-11-07 14:56:24,0
+107907,14,1,13,401,2017-11-09 09:30:44,0
+74924,11,1,19,487,2017-11-08 23:13:58,0
+151603,10,1,37,317,2017-11-07 03:36:20,0
+1694,18,1,35,107,2017-11-09 14:10:15,0
+111025,14,1,9,480,2017-11-08 13:36:05,0
+141432,2,1,19,219,2017-11-07 08:25:51,0
+65631,3,1,10,205,2017-11-07 15:07:52,0
+248946,11,1,19,469,2017-11-08 12:36:38,0
+196529,2,1,19,469,2017-11-08 06:42:33,0
+90891,15,1,13,245,2017-11-08 04:47:25,0
+90948,12,1,18,245,2017-11-08 13:25:52,0
+71380,2,1,19,452,2017-11-07 16:47:44,0
+127743,125,0,29,110,2017-11-09 05:18:55,0
+54009,18,1,8,121,2017-11-08 05:10:35,0
+125222,3,1,13,280,2017-11-09 00:51:49,0
+160456,3,1,36,280,2017-11-08 12:20:24,0
+29972,15,1,11,386,2017-11-09 13:22:25,0
+198164,17,1,20,280,2017-11-08 23:16:45,0
+102762,12,1,19,277,2017-11-08 00:26:46,0
+50433,14,1,8,134,2017-11-09 01:14:13,0
+126685,12,1,13,328,2017-11-07 03:59:30,0
+80163,3,1,19,442,2017-11-07 15:15:11,0
+78531,3,1,41,280,2017-11-07 06:08:30,0
+53454,2,2,8,122,2017-11-08 12:16:54,0
+40995,12,1,6,497,2017-11-08 11:19:36,0
+3197,15,1,13,245,2017-11-06 22:27:55,0
+223623,9,1,19,466,2017-11-09 12:07:54,0
+115690,2,1,19,122,2017-11-07 01:16:00,0
+55024,14,1,13,349,2017-11-08 13:11:11,0
+80689,3,1,19,280,2017-11-07 00:50:57,0
+222432,15,2,65,245,2017-11-08 21:38:49,0
+49293,2,1,37,469,2017-11-07 01:55:54,0
+93715,9,1,19,466,2017-11-07 10:49:19,0
+19264,9,1,19,466,2017-11-08 12:32:32,0
+100971,12,1,19,409,2017-11-07 19:45:26,0
+81922,3,1,20,280,2017-11-08 12:20:35,0
+85150,12,2,27,259,2017-11-07 10:53:27,0
+48518,18,3543,748,107,2017-11-07 23:28:36,0
+72607,9,1,41,442,2017-11-09 13:46:36,0
+124045,12,1,20,259,2017-11-08 10:45:26,0
+81501,9,1,25,244,2017-11-07 11:18:48,0
+34380,13,1,13,477,2017-11-08 23:52:21,0
+115445,9,1,19,489,2017-11-07 16:36:00,0
+72361,3,1,13,480,2017-11-09 12:54:08,0
+8595,12,1,18,145,2017-11-09 15:25:08,0
+189032,13,1,17,477,2017-11-08 11:44:19,0
+83958,18,1,6,107,2017-11-08 23:15:04,0
+125222,3,2,27,480,2017-11-09 01:21:16,0
+48383,18,1,53,317,2017-11-07 03:42:33,0
+93021,9,1,19,232,2017-11-09 15:49:18,0
+120385,2,1,13,401,2017-11-08 05:01:49,0
+37167,12,1,53,265,2017-11-07 11:27:52,0
+49914,3,1,19,280,2017-11-09 00:07:56,0
+163572,3,1,19,280,2017-11-07 08:00:44,0
+68384,3,1,14,280,2017-11-09 06:11:56,0
+73516,12,2,19,326,2017-11-08 09:33:48,0
+12562,2,1,8,219,2017-11-08 10:47:20,0
+73908,94,1,18,361,2017-11-08 02:44:30,0
+195980,2,1,3,364,2017-11-07 14:54:50,0
+79827,15,1,13,245,2017-11-07 05:15:13,0
+25028,25,1,13,259,2017-11-08 08:56:59,0
+44067,1,1,13,134,2017-11-07 14:08:05,0
+61313,2,1,13,236,2017-11-08 03:23:02,0
+125485,14,1,36,371,2017-11-09 15:56:10,0
+114736,3,1,13,480,2017-11-07 03:05:21,0
+44229,22,1,13,496,2017-11-07 06:18:13,0
+83306,2,1,35,477,2017-11-08 06:13:47,0
+133331,3,1,13,480,2017-11-08 23:43:13,0
+124520,2,1,13,219,2017-11-07 14:41:10,0
+705,2,1,13,469,2017-11-07 12:35:32,0
+316811,3,1,19,280,2017-11-09 12:39:28,0
+41261,9,2,42,466,2017-11-08 03:51:05,0
+99862,9,1,9,215,2017-11-09 04:11:47,0
+45992,17,1,14,128,2017-11-09 14:22:37,0
+130760,3,1,14,115,2017-11-07 15:56:45,0
+9057,12,1,13,265,2017-11-07 19:06:17,0
+98472,2,1,19,122,2017-11-08 10:24:50,0
+38142,12,1,19,328,2017-11-06 23:00:37,0
+4652,3,1,13,280,2017-11-07 04:16:16,0
+13756,27,1,19,153,2017-11-06 22:55:18,0
+257183,2,1,10,219,2017-11-08 13:50:39,0
+152714,13,1,8,477,2017-11-07 05:04:03,0
+55722,3,2,13,30,2017-11-09 11:04:46,0
+316642,18,1,13,107,2017-11-08 16:11:07,0
+43289,32,1,19,376,2017-11-09 05:21:57,0
+47490,18,1,19,107,2017-11-07 01:55:15,0
+5587,12,1,19,481,2017-11-08 06:32:45,0
+86376,32,1,79,376,2017-11-09 11:52:56,0
+84896,28,1,22,135,2017-11-08 01:27:09,0
+9460,12,1,25,409,2017-11-07 09:06:34,0
+6375,8,1,19,145,2017-11-07 06:46:49,0
+334967,2,1,13,219,2017-11-09 13:46:15,0
+5348,6,1,13,125,2017-11-07 02:07:09,0
+4412,2,1,19,364,2017-11-08 09:29:43,0
+92100,18,1,19,107,2017-11-08 15:47:14,0
+361704,3,1,13,480,2017-11-09 02:59:02,0
+25119,14,1,19,134,2017-11-08 01:23:36,0
+43793,3,1,13,452,2017-11-07 01:49:43,0
+67581,2,1,8,435,2017-11-09 09:59:46,0
+126168,14,1,19,113,2017-11-08 02:42:41,0
+79787,19,0,29,213,2017-11-09 11:48:30,0
+75644,24,1,13,105,2017-11-08 21:14:15,0
+59869,9,1,10,445,2017-11-07 03:09:51,0
+73516,3,2,16,153,2017-11-08 06:03:15,0
+41853,3,1,17,442,2017-11-07 07:27:30,0
+193406,12,1,19,259,2017-11-07 03:10:16,0
+112715,12,1,32,328,2017-11-09 15:33:45,0
+30587,3,2,19,280,2017-11-08 06:10:21,0
+197705,21,1,19,232,2017-11-08 09:58:50,0
+86420,3,1,22,280,2017-11-07 02:14:26,0
+239385,15,1,19,140,2017-11-08 01:01:36,0
+84774,15,1,32,245,2017-11-08 14:50:11,0
+141849,15,1,25,245,2017-11-08 17:00:53,0
+157020,14,1,13,480,2017-11-07 03:24:28,0
+140124,22,1,10,116,2017-11-09 10:37:26,0
+19873,3,1,13,173,2017-11-09 00:18:47,0
+112302,3,1,19,205,2017-11-09 14:08:27,0
+127360,12,1,53,340,2017-11-08 17:21:22,0
+163713,12,1,14,245,2017-11-06 19:44:56,0
+7120,14,1,13,401,2017-11-09 10:27:03,0
+60501,61,1,13,21,2017-11-07 07:48:27,0
+200436,12,1,23,259,2017-11-08 04:42:34,0
+95766,15,1,9,138,2017-11-09 14:00:29,0
+156272,14,1,19,349,2017-11-07 00:02:02,0
+93542,3,1,3,280,2017-11-09 13:42:56,0
+136421,2,2,13,236,2017-11-07 00:32:27,0
+92735,36,1,13,110,2017-11-08 11:46:41,0
+86474,9,1,19,232,2017-11-07 06:52:29,0
+45863,3,1,13,280,2017-11-08 13:43:12,0
+57060,9,2,37,215,2017-11-09 07:33:23,0
+118146,5,1,2,377,2017-11-08 13:31:28,0
+17149,12,1,58,328,2017-11-07 13:46:06,0
+33919,2,1,13,469,2017-11-07 11:50:01,0
+100602,26,1,19,121,2017-11-08 02:27:23,0
+31401,24,1,16,178,2017-11-07 10:55:31,0
+189816,9,1,19,445,2017-11-08 09:31:12,0
+47071,64,1,10,459,2017-11-08 00:00:17,0
+48502,14,1,53,379,2017-11-07 22:51:49,0
+79787,2,1,36,377,2017-11-06 23:11:41,0
+48062,14,1,17,379,2017-11-07 18:15:43,0
+73516,12,2,13,245,2017-11-06 16:05:10,0
+123948,3,1,8,409,2017-11-07 14:52:45,0
+63030,3,1,19,280,2017-11-08 01:29:16,0
+48683,18,1,19,134,2017-11-08 05:51:17,0
+53769,12,1,28,259,2017-11-06 16:06:34,0
+33934,15,1,37,265,2017-11-08 13:52:25,0
+122736,15,1,37,265,2017-11-07 03:36:43,0
+122593,11,1,13,319,2017-11-07 13:37:09,0
+4019,12,1,13,265,2017-11-08 17:33:34,0
+95570,12,1,17,265,2017-11-09 00:42:13,0
+11133,3,1,6,173,2017-11-09 06:54:39,0
+43834,3,1,8,280,2017-11-08 00:34:46,0
+105433,2,2,32,205,2017-11-09 12:59:15,0
+100176,9,1,58,232,2017-11-07 18:13:58,0
+100275,25,1,3,259,2017-11-06 16:24:22,0
+50028,9,1,17,445,2017-11-07 05:39:30,0
+71117,1,1,13,115,2017-11-08 08:48:22,0
+37262,3,1,37,466,2017-11-09 07:46:02,0
+53651,22,1,14,116,2017-11-09 02:31:59,0
+333092,3,1,17,130,2017-11-09 01:12:13,0
+94117,12,1,41,105,2017-11-09 04:40:37,0
+109734,26,1,41,121,2017-11-07 04:51:03,0
+2208,3,1,19,280,2017-11-08 13:32:29,0
+158559,12,1,10,265,2017-11-08 08:13:32,0
+234660,15,1,13,265,2017-11-08 09:09:24,0
+53960,2,1,43,205,2017-11-08 23:01:59,0
+50131,11,1,17,137,2017-11-09 05:11:21,0
+80488,12,1,15,265,2017-11-09 07:20:52,0
+48240,3,1,10,19,2017-11-06 23:45:34,0
+5954,3,1,30,211,2017-11-08 11:50:09,0
+228706,12,1,13,265,2017-11-08 07:12:18,0
+80219,7,1,18,101,2017-11-09 02:35:53,0
+77399,18,1,19,107,2017-11-08 11:31:34,0
+34985,3,1,19,115,2017-11-08 11:07:19,0
+18942,14,1,8,379,2017-11-09 05:08:47,0
+5574,26,1,3,121,2017-11-06 16:09:50,0
+39421,9,1,17,215,2017-11-06 16:08:41,0
+5314,12,1,13,265,2017-11-07 12:58:38,0
+15114,15,1,53,245,2017-11-07 18:31:46,0
+8848,2,1,17,243,2017-11-07 21:55:07,0
+67585,12,1,19,409,2017-11-09 02:44:35,0
+172960,15,1,19,265,2017-11-07 00:23:01,0
+4052,14,1,16,134,2017-11-08 10:07:12,0
+100393,18,1,30,107,2017-11-07 11:10:23,0
+26643,3,2,19,115,2017-11-08 11:33:35,0
+229712,15,1,22,111,2017-11-09 15:55:59,0
+69665,9,1,19,466,2017-11-09 05:23:38,0
+40001,11,1,13,219,2017-11-07 09:21:35,0
+116740,21,1,13,128,2017-11-07 06:43:20,0
+23086,2,1,19,469,2017-11-07 04:28:00,0
+73610,12,1,18,497,2017-11-07 11:39:19,0
+69449,12,1,13,245,2017-11-08 05:38:13,0
+7088,9,1,16,445,2017-11-08 06:02:24,0
+5348,2,1,9,477,2017-11-07 15:41:25,0
+3743,9,1,18,107,2017-11-09 12:06:54,0
+30151,3,1,40,417,2017-11-08 09:28:57,0
+301960,12,1,13,259,2017-11-09 01:23:41,0
+178873,1,1,6,125,2017-11-07 14:12:21,0
+21660,9,1,13,134,2017-11-08 01:19:54,0
+105061,15,1,19,130,2017-11-08 01:33:10,0
+244689,5,1,22,377,2017-11-08 11:33:13,0
+77346,3,1,10,280,2017-11-09 04:29:36,0
+37255,15,1,12,140,2017-11-07 15:49:19,0
+36728,27,1,19,122,2017-11-07 09:53:42,0
+78787,2,1,18,219,2017-11-09 03:48:58,0
+222475,12,1,15,497,2017-11-08 08:10:56,0
+88018,18,1,14,439,2017-11-09 00:28:05,0
+162378,1,1,3,153,2017-11-09 05:22:11,0
+39220,3,1,13,280,2017-11-07 00:48:03,0
+113350,8,1,17,145,2017-11-09 10:27:41,0
+195042,3,1,6,442,2017-11-07 07:47:24,0
+107932,15,1,10,245,2017-11-07 01:37:52,0
+81121,12,1,18,265,2017-11-07 05:40:40,0
+4126,8,1,23,145,2017-11-09 06:42:00,0
+183284,12,1,13,265,2017-11-07 00:41:31,0
+27482,18,1,22,107,2017-11-09 00:25:26,0
+193346,3,1,13,205,2017-11-09 12:37:45,0
+48212,64,1,13,459,2017-11-09 04:14:51,0
+114655,2,1,22,237,2017-11-07 03:07:42,0
+84477,23,1,10,153,2017-11-07 05:36:15,0
+203803,18,1,18,121,2017-11-07 15:26:30,0
+5785,14,1,22,360,2017-11-07 00:14:23,0
+73954,12,1,15,265,2017-11-08 08:39:03,0
+35538,47,1,49,484,2017-11-09 11:38:27,0
+114235,14,1,27,118,2017-11-08 02:32:47,0
+84774,2,1,19,237,2017-11-08 12:17:20,0
+152714,6,1,10,125,2017-11-08 12:46:48,0
+55846,2,1,18,237,2017-11-08 02:14:35,0
+13104,3,1,19,480,2017-11-08 23:41:37,0
+69260,12,1,13,409,2017-11-08 15:56:08,0
+86479,3,1,13,480,2017-11-08 01:12:07,0
+66824,3,1,19,137,2017-11-09 10:35:26,0
+78672,9,1,37,253,2017-11-08 02:17:02,0
+155197,14,1,19,489,2017-11-06 17:09:24,0
+9868,3,1,13,480,2017-11-08 02:47:45,0
+38265,3,1,13,211,2017-11-08 23:39:31,0
+119349,14,1,8,356,2017-11-09 12:03:59,0
+41526,6,1,15,459,2017-11-08 07:02:11,0
+8297,12,1,13,265,2017-11-09 11:32:39,0
+295203,1,1,13,115,2017-11-08 23:49:40,0
+63483,12,1,25,19,2017-11-09 06:53:25,0
+308464,8,1,6,145,2017-11-09 04:03:20,0
+20362,7,1,27,101,2017-11-09 10:41:10,0
+247279,9,2,65,234,2017-11-07 22:53:16,0
+16155,27,1,13,122,2017-11-09 04:23:15,0
+60355,12,1,23,259,2017-11-09 04:21:55,0
+63981,3,1,13,280,2017-11-08 07:48:18,0
+37375,3,1,13,280,2017-11-08 07:18:22,0
+32290,19,0,50,347,2017-11-07 23:49:42,0
+23589,14,1,19,489,2017-11-09 00:31:44,0
+114220,8,1,19,145,2017-11-07 14:35:54,0
+15572,12,1,1,265,2017-11-07 16:35:45,0
+13634,18,1,20,134,2017-11-07 17:30:51,0
+36213,2,2,12,205,2017-11-09 11:27:46,0
+93155,2,1,13,435,2017-11-07 10:37:21,0
+19507,12,1,13,245,2017-11-07 10:16:07,0
+85644,2,1,37,219,2017-11-07 09:59:11,0
+357753,2,1,15,435,2017-11-08 22:25:54,0
+35007,64,1,12,459,2017-11-07 07:13:31,0
+20081,2,1,19,401,2017-11-07 16:09:15,0
+28420,9,1,19,134,2017-11-07 16:32:36,0
+53929,8,1,15,145,2017-11-07 01:42:21,0
+41082,9,2,10,334,2017-11-07 12:40:13,0
+5306,18,1,22,134,2017-11-09 00:02:57,0
+69449,15,1,18,412,2017-11-08 04:51:39,0
+95766,2,1,6,477,2017-11-07 13:42:41,0
+93065,12,1,47,140,2017-11-07 14:06:17,0
+49006,9,1,19,489,2017-11-09 05:12:31,0
+32663,18,1,15,107,2017-11-08 00:45:34,0
+95766,14,1,22,379,2017-11-09 05:56:53,0
+84900,12,1,19,481,2017-11-09 14:16:52,0
+32115,12,1,17,265,2017-11-07 18:46:47,0
+27607,14,1,9,401,2017-11-06 17:04:20,0
+25679,2,1,19,205,2017-11-09 14:09:13,0
+64268,9,1,19,258,2017-11-08 08:45:33,0
+168650,3,1,19,489,2017-11-08 06:48:57,0
+117033,3,1,22,211,2017-11-07 01:28:18,0
+17231,2,1,19,469,2017-11-07 10:53:22,0
+105932,2,2,13,364,2017-11-06 17:08:28,0
+64194,15,1,14,140,2017-11-07 01:27:22,0
+192879,15,1,19,265,2017-11-07 04:50:49,0
+53664,3,1,15,489,2017-11-08 07:59:02,0
+98424,14,1,17,463,2017-11-08 06:09:55,0
+121994,3,1,58,115,2017-11-09 11:21:01,0
+34980,26,1,19,477,2017-11-08 14:47:04,0
+14809,9,1,32,466,2017-11-09 14:17:27,0
+113326,20,1,19,478,2017-11-09 11:47:50,0
+173152,46,0,24,347,2017-11-07 08:06:00,0
+178873,2,1,15,205,2017-11-08 11:37:19,0
+113276,3,1,19,280,2017-11-07 13:52:32,0
+175316,9,1,41,234,2017-11-08 17:43:06,0
+108942,18,1,19,107,2017-11-06 18:31:27,0
+34450,14,1,32,442,2017-11-07 15:00:37,0
+2572,13,1,28,477,2017-11-08 00:10:35,0
+14737,3,1,19,280,2017-11-08 00:38:01,0
+195400,14,1,19,134,2017-11-07 01:40:32,0
+87556,14,1,13,401,2017-11-07 23:49:51,0
+55891,3,1,18,480,2017-11-07 05:45:27,0
+5178,18,1,19,134,2017-11-09 06:02:31,0
+121339,26,1,17,477,2017-11-09 12:04:20,0
+67606,3,1,14,153,2017-11-08 02:38:08,0
+5314,27,1,19,153,2017-11-07 14:27:11,0
+16188,19,0,21,213,2017-11-09 15:04:12,1
+105612,3,1,19,280,2017-11-09 03:01:42,0
+82843,9,1,22,134,2017-11-08 23:32:05,0
+129882,3,1,19,280,2017-11-08 13:18:42,0
+86767,28,1,18,135,2017-11-07 13:26:33,0
+152056,12,1,13,205,2017-11-07 04:59:43,0
+125460,23,1,20,479,2017-11-06 22:48:57,0
+28501,26,1,13,121,2017-11-09 10:45:02,0
+95063,3,1,35,280,2017-11-09 03:39:43,0
+54841,12,1,20,277,2017-11-08 00:48:38,0
+18466,3,1,13,137,2017-11-07 02:51:45,0
+53670,3,1,17,280,2017-11-09 06:43:28,0
+10456,2,1,17,236,2017-11-07 04:04:37,0
+75634,12,1,19,245,2017-11-08 02:52:35,0
+10527,15,1,19,245,2017-11-09 05:22:39,0
+111025,18,1,19,107,2017-11-08 13:14:16,0
+18246,12,1,19,178,2017-11-08 05:59:04,0
+135364,9,1,13,489,2017-11-07 08:52:54,0
+62036,3,1,13,409,2017-11-08 01:11:38,0
+38300,2,1,22,237,2017-11-08 04:55:16,0
+33060,23,1,12,153,2017-11-07 14:01:42,0
+79049,2,1,15,205,2017-11-09 12:49:48,0
+36052,12,1,17,145,2017-11-07 05:41:00,0
+127986,1,1,6,452,2017-11-08 02:14:07,0
+33110,3,1,2,280,2017-11-09 01:48:14,0
+45213,14,1,57,224,2017-11-08 10:53:33,0
+72951,12,1,14,245,2017-11-08 13:01:39,0
+48569,12,1,13,409,2017-11-09 01:31:33,0
+100245,3,1,42,205,2017-11-07 09:13:24,0
+124938,2,1,13,477,2017-11-07 03:50:48,0
+13597,15,1,37,379,2017-11-07 10:40:30,0
+4052,13,1,19,477,2017-11-07 14:02:33,0
+114276,12,1,19,328,2017-11-07 14:50:04,0
+21065,3,1,22,205,2017-11-06 23:20:36,0
+120709,14,1,18,463,2017-11-07 05:12:54,0
+110296,12,1,13,140,2017-11-07 09:23:32,0
+288989,1,1,13,115,2017-11-08 18:12:01,0
+10074,3,1,13,280,2017-11-07 06:46:58,0
+29107,1,1,40,134,2017-11-08 13:36:23,0
+23616,18,1,1,107,2017-11-07 00:37:07,0
+28652,15,1,25,386,2017-11-07 09:06:17,0
+4330,3,1,19,452,2017-11-09 01:13:18,0
+105560,2,2,19,243,2017-11-07 09:44:55,0
+68776,12,1,27,245,2017-11-08 07:28:12,0
+62109,3,1,13,280,2017-11-08 01:03:40,0
+64393,18,1,4,439,2017-11-08 23:32:30,0
+363653,3,1,6,182,2017-11-09 10:07:41,0
+26814,266,3866,866,347,2017-11-09 11:48:15,0
+107527,18,1,22,121,2017-11-08 13:51:16,0
+17077,3,1,19,137,2017-11-07 22:29:28,0
+92668,11,1,15,137,2017-11-09 06:22:50,0
+30614,9,2,13,334,2017-11-08 09:11:56,0
+115748,13,1,10,477,2017-11-07 05:46:51,0
+30720,12,1,19,259,2017-11-06 16:52:52,0
+46755,12,1,17,265,2017-11-09 04:37:12,0
+109009,2,1,42,435,2017-11-09 04:30:02,0
+26361,27,1,13,122,2017-11-08 23:55:36,0
+18649,2,1,17,435,2017-11-07 04:37:57,0
+85489,12,1,19,178,2017-11-07 12:03:39,0
+31054,12,1,14,245,2017-11-09 04:33:35,0
+5147,1,1,41,135,2017-11-07 08:10:34,0
+103527,21,1,13,128,2017-11-09 06:40:38,0
+43692,2,1,19,435,2017-11-07 01:12:11,0
+103411,3,2,19,137,2017-11-09 15:42:35,0
+50897,3,1,19,280,2017-11-08 13:19:08,0
+151929,3,1,19,115,2017-11-09 07:54:18,0
+76488,26,1,3,121,2017-11-08 22:21:10,0
+100485,12,1,13,178,2017-11-07 10:22:32,0
+51299,2,1,25,237,2017-11-07 04:11:41,0
+59295,2,1,10,219,2017-11-08 06:33:13,0
+99897,12,1,13,19,2017-11-09 01:20:10,0
+69710,15,1,19,130,2017-11-08 06:07:34,0
+156240,24,1,16,105,2017-11-07 13:23:32,0
+206406,15,1,19,245,2017-11-07 15:53:11,0
+360276,2,1,19,237,2017-11-09 01:53:52,0
+79555,18,1,20,107,2017-11-09 14:31:04,0
+129477,21,1,13,128,2017-11-07 09:14:59,0
+100275,8,1,19,145,2017-11-06 16:02:05,0
+133331,3,1,19,489,2017-11-06 16:02:18,0
+118756,3,1,19,19,2017-11-09 07:00:29,0
+206220,6,1,15,459,2017-11-07 14:57:51,0
+89981,15,1,20,245,2017-11-07 11:59:14,0
+25695,3,1,20,280,2017-11-07 03:42:29,0
+126042,17,1,8,280,2017-11-07 04:19:59,0
+48395,3,1,16,280,2017-11-08 09:20:04,0
+74847,18,1,13,107,2017-11-08 13:06:32,0
+281929,3,1,19,280,2017-11-08 00:20:22,0
+59043,12,1,19,481,2017-11-09 07:03:22,0
+18848,15,1,18,153,2017-11-08 17:13:06,0
+50924,9,1,19,232,2017-11-08 17:19:05,0
+14615,15,1,13,245,2017-11-07 03:01:06,0
+103301,11,1,13,487,2017-11-09 05:56:10,0
+76855,12,1,19,205,2017-11-07 14:01:44,0
+17329,12,1,18,265,2017-11-06 22:36:12,0
+110168,25,1,3,259,2017-11-07 07:17:49,0
+82816,14,1,16,463,2017-11-07 14:35:31,0
+92506,15,1,22,245,2017-11-08 03:53:13,0
+188093,3,1,13,280,2017-11-07 15:36:27,0
+191922,2,1,19,122,2017-11-06 17:24:15,0
+77266,9,1,19,107,2017-11-09 06:31:27,0
+15565,12,1,20,245,2017-11-08 00:04:36,0
+50164,15,1,18,412,2017-11-07 13:34:36,0
+42103,12,1,22,245,2017-11-08 00:07:02,0
+5178,2,1,6,237,2017-11-07 04:28:47,0
+78950,12,1,19,259,2017-11-06 22:11:34,0
+84460,14,1,19,480,2017-11-08 06:55:17,0
+78124,19,0,29,213,2017-11-07 04:05:27,0
+73516,12,2,42,326,2017-11-06 18:39:10,0
+31785,11,1,13,487,2017-11-08 05:22:14,0
+32511,6,1,19,125,2017-11-07 12:14:53,0
+95837,14,1,19,134,2017-11-07 11:37:24,0
+111830,9,1,25,134,2017-11-09 15:56:56,0
+52052,9,1,19,466,2017-11-07 15:06:14,0
+92544,12,1,19,205,2017-11-08 09:02:15,0
+16984,25,1,11,259,2017-11-07 12:11:08,0
+18439,18,1,25,107,2017-11-07 09:52:57,0
+195974,12,1,19,212,2017-11-08 07:25:03,0
+85901,26,1,17,266,2017-11-08 10:24:57,0
+87073,3,1,15,130,2017-11-08 01:04:01,0
+124166,12,1,19,140,2017-11-08 01:34:06,0
+9177,3,1,22,442,2017-11-09 12:16:38,0
+41172,15,1,19,3,2017-11-08 11:26:25,0
+52278,9,1,22,215,2017-11-08 05:59:14,0
+57849,8,2,28,259,2017-11-07 10:41:51,0
+14112,11,1,13,487,2017-11-09 07:04:25,0
+1909,15,1,13,245,2017-11-07 08:12:31,0
+216665,12,1,13,245,2017-11-08 10:18:51,0
+88080,14,1,18,134,2017-11-09 07:05:23,0
+41392,3,1,22,480,2017-11-08 14:28:42,0
+52710,15,1,13,245,2017-11-07 22:24:26,0
+56257,2,1,2,477,2017-11-08 08:36:25,0
+81138,2,1,40,477,2017-11-09 07:12:41,0
+87000,15,1,35,245,2017-11-07 16:25:31,0
+33694,12,1,6,245,2017-11-07 12:51:31,0
+100502,15,1,17,265,2017-11-07 08:30:29,0
+223223,3,1,17,280,2017-11-08 13:47:02,0
+25616,18,1,32,107,2017-11-08 12:54:25,0
+238986,18,1,13,121,2017-11-09 10:56:56,0
+48282,3,1,19,30,2017-11-09 13:39:28,0
+64815,15,1,13,386,2017-11-08 13:52:11,0
+81598,27,1,31,153,2017-11-08 08:12:29,0
+65793,14,1,19,118,2017-11-09 03:43:02,0
+77943,12,1,18,205,2017-11-07 05:32:35,0
+85065,2,1,9,377,2017-11-09 14:33:16,0
+45868,12,1,22,178,2017-11-07 03:45:12,0
+78616,3,1,19,371,2017-11-09 07:37:03,0
+172923,3,1,18,280,2017-11-08 16:07:24,0
+101863,3,1,19,280,2017-11-09 03:08:46,0
+5328,21,1,17,128,2017-11-06 16:08:13,0
+2976,12,1,18,242,2017-11-08 06:46:05,0
+100182,11,1,19,137,2017-11-09 09:47:30,0
+73238,64,1,13,459,2017-11-08 15:21:19,0
+189635,2,1,13,236,2017-11-07 04:36:47,0
+28731,14,2,10,439,2017-11-08 11:48:23,0
+55853,3,1,32,280,2017-11-08 06:05:43,0
+53454,3,1,13,280,2017-11-08 14:08:04,0
+168248,2,1,13,237,2017-11-09 14:20:31,0
+11282,2,1,19,377,2017-11-09 12:25:48,0
+113543,14,1,17,379,2017-11-09 13:00:15,0
+119289,3,1,19,280,2017-11-09 10:32:09,0
+117078,3,1,22,280,2017-11-07 02:24:49,0
+119734,18,1,47,107,2017-11-08 09:33:37,0
+71111,3,1,10,115,2017-11-08 00:14:33,0
+50284,12,1,17,178,2017-11-07 13:20:20,0
+105475,12,1,6,265,2017-11-08 16:11:40,0
+165741,9,1,19,232,2017-11-07 00:41:04,0
+68891,14,1,3,134,2017-11-08 02:55:31,0
+170158,14,1,22,134,2017-11-08 16:19:03,0
+17447,15,1,19,278,2017-11-08 11:14:03,0
+7072,12,2,65,178,2017-11-07 14:29:20,0
+206411,64,1,19,459,2017-11-07 01:56:12,0
+346590,25,2,17,259,2017-11-09 13:55:52,0
+45992,12,1,19,481,2017-11-08 01:08:56,0
+81685,4,1,19,101,2017-11-09 15:20:42,0
+45473,28,1,20,135,2017-11-08 05:56:33,0
+92636,15,1,19,315,2017-11-07 10:00:20,0
+81398,15,1,27,245,2017-11-08 13:59:16,0
+48939,26,1,19,121,2017-11-08 07:40:14,0
+102038,8,1,13,145,2017-11-09 09:35:54,0
+125008,1,1,18,153,2017-11-09 14:00:20,0
+24905,6,1,19,125,2017-11-09 14:55:04,0
+113326,12,1,9,265,2017-11-07 09:42:06,0
+156240,12,1,19,145,2017-11-07 05:17:57,0
+323175,9,2,19,215,2017-11-09 14:12:22,0
+71458,18,1,19,121,2017-11-08 17:02:12,0
+34387,14,1,6,467,2017-11-08 16:09:12,0
+58672,12,1,17,105,2017-11-07 09:33:53,0
+111573,3,1,19,280,2017-11-08 14:10:56,0
+102970,18,1,2,107,2017-11-08 12:23:04,0
+40654,3,1,13,371,2017-11-07 01:46:23,0
+89430,15,1,19,153,2017-11-08 01:50:18,0
+46573,23,1,13,153,2017-11-09 15:10:59,0
+73487,2,1,19,435,2017-11-09 04:14:29,0
+93067,18,3032,607,107,2017-11-07 09:58:44,0
+31616,18,1,4,376,2017-11-07 15:42:34,0
+48057,15,1,25,245,2017-11-08 15:31:51,0
+124166,15,1,6,245,2017-11-07 04:11:04,0
+171073,3,1,3,417,2017-11-07 00:49:50,0
+206243,12,1,6,481,2017-11-07 23:32:33,0
+99809,18,1,16,121,2017-11-09 07:52:59,0
+49462,2,1,25,236,2017-11-09 05:07:36,0
+163593,64,1,19,459,2017-11-07 13:53:40,0
+68333,3,1,6,442,2017-11-09 14:57:36,0
+105215,13,1,27,400,2017-11-07 05:00:49,0
+102918,14,1,18,134,2017-11-09 08:59:42,0
+142067,12,1,13,481,2017-11-08 10:41:42,0
+204260,12,1,6,178,2017-11-09 14:30:17,0
+51740,9,1,13,334,2017-11-07 06:14:59,0
+78446,15,1,19,245,2017-11-07 16:57:52,0
+60854,15,1,19,245,2017-11-08 14:35:37,0
+48282,2,1,3,435,2017-11-08 04:12:57,0
+209897,3,1,34,115,2017-11-08 08:13:36,0
+149120,12,1,13,328,2017-11-07 09:41:51,0
+81453,15,1,6,315,2017-11-07 07:03:46,0
+173091,9,1,19,134,2017-11-09 01:28:25,0
+8210,2,2,22,205,2017-11-07 15:09:18,0
+165408,2,1,3,122,2017-11-07 10:42:56,0
+237884,13,1,9,477,2017-11-08 15:50:52,0
+139435,13,1,13,477,2017-11-07 01:35:48,0
+163607,19,0,24,347,2017-11-09 03:20:52,0
+213532,2,1,8,469,2017-11-08 04:35:10,0
+45522,19,0,0,213,2017-11-08 00:15:22,0
+54587,12,1,13,178,2017-11-09 13:17:43,0
+68965,26,1,20,121,2017-11-08 01:00:22,0
+13338,3,1,37,280,2017-11-08 03:45:37,0
+125050,3,1,40,280,2017-11-07 08:10:56,0
+4977,13,1,44,469,2017-11-09 09:18:24,0
+78289,2,1,19,236,2017-11-09 01:01:49,0
+77295,13,1,17,469,2017-11-09 15:13:37,0
+64322,12,1,28,245,2017-11-06 16:53:46,0
+71575,18,1,32,439,2017-11-09 09:38:47,0
+14074,12,1,3,259,2017-11-06 16:21:57,0
+78500,12,1,13,178,2017-11-09 00:41:59,0
+154155,12,1,13,265,2017-11-07 12:43:04,0
+48656,6,1,43,459,2017-11-07 15:23:24,0
+37935,21,1,20,232,2017-11-07 17:03:08,0
+79352,2,1,18,212,2017-11-07 12:41:52,0
+17321,2,1,13,477,2017-11-07 06:12:12,0
+84725,15,1,13,265,2017-11-08 09:52:00,0
+63189,14,1,13,442,2017-11-08 03:52:30,0
+238590,2,1,19,122,2017-11-08 15:21:16,0
+100275,15,1,13,245,2017-11-06 17:52:26,0
+239820,11,1,13,219,2017-11-09 07:02:08,0
+973,3,1,17,280,2017-11-09 07:10:30,0
+179907,12,1,19,265,2017-11-07 08:03:30,0
+9624,1,1,19,153,2017-11-07 04:18:14,0
+121269,9,1,3,215,2017-11-09 11:00:44,0
+53715,12,1,13,140,2017-11-09 15:18:18,0
+41147,12,1,13,259,2017-11-09 03:25:22,0
+139605,24,2,10,105,2017-11-07 05:06:28,0
+105560,12,1,6,265,2017-11-07 15:04:18,0
+103337,2,1,13,237,2017-11-09 11:02:00,0
+112088,3,1,7,211,2017-11-09 05:38:54,0
+120112,12,1,19,265,2017-11-07 04:10:23,0
+48919,15,1,17,386,2017-11-09 02:55:59,0
+67197,9,1,17,466,2017-11-09 11:35:16,0
+39007,9,1,17,127,2017-11-08 18:35:18,0
+151192,17,1,17,280,2017-11-07 00:25:26,0
+160793,14,1,53,480,2017-11-07 03:28:17,0
+112765,2,1,58,469,2017-11-09 05:34:48,0
+110002,12,1,10,265,2017-11-07 15:58:47,0
+57803,9,1,19,244,2017-11-08 08:17:25,0
+85077,3,1,37,280,2017-11-08 09:09:20,0
+106257,3,1,34,280,2017-11-07 09:50:58,0
+61136,21,1,10,232,2017-11-09 08:56:31,0
+144614,21,1,25,128,2017-11-07 08:18:30,0
+94386,18,1,13,121,2017-11-09 01:05:48,0
+99024,2,1,13,219,2017-11-06 20:05:36,0
+77318,12,1,19,178,2017-11-09 13:33:43,0
+80714,20,1,17,259,2017-11-07 05:38:48,0
+199673,3,1,19,280,2017-11-08 08:33:30,0
+53632,12,1,19,19,2017-11-09 03:02:54,0
+65469,18,1,22,134,2017-11-09 14:32:50,0
+68371,2,1,13,212,2017-11-09 11:16:05,0
+28011,11,1,15,325,2017-11-08 05:28:29,0
+73487,2,1,17,212,2017-11-07 09:46:08,0
+223201,9,1,13,442,2017-11-08 12:18:39,0
+50512,2,1,25,477,2017-11-08 08:16:21,0
+176671,2,1,8,452,2017-11-07 10:38:19,0
+61664,2,1,22,122,2017-11-07 14:14:42,0
+27086,3,1,19,115,2017-11-09 06:46:19,0
+97541,2,1,27,435,2017-11-07 21:28:05,0
+4442,12,1,19,265,2017-11-08 15:30:34,0
+143936,3,1,13,137,2017-11-07 06:52:29,0
+88777,18,1,13,121,2017-11-08 13:29:06,0
+88696,64,1,19,459,2017-11-07 19:30:05,0
+134086,2,1,19,435,2017-11-06 23:41:33,0
+22338,2,1,1,237,2017-11-07 06:02:06,0
+115708,12,1,26,424,2017-11-06 16:53:29,0
+84445,14,1,19,379,2017-11-08 00:19:00,0
+180624,14,1,13,480,2017-11-07 02:28:01,0
+36061,14,1,47,439,2017-11-07 10:43:22,0
+139451,2,1,19,212,2017-11-07 09:41:09,0
+147153,15,1,19,3,2017-11-07 00:01:52,0
+10299,15,1,25,412,2017-11-09 00:34:40,0
+106485,23,1,18,479,2017-11-09 08:59:58,0
+71071,7,1,6,101,2017-11-09 09:15:33,0
+352951,6,1,25,459,2017-11-09 15:12:55,0
+42948,18,1,37,107,2017-11-08 11:42:22,0
+198793,3,1,19,173,2017-11-07 14:20:30,0
+29346,7,1,19,101,2017-11-09 05:40:12,0
+106598,8,1,36,140,2017-11-07 22:43:21,0
+68568,4,1,13,101,2017-11-08 02:12:30,0
+62064,6,1,13,125,2017-11-06 16:21:11,0
+95766,18,1,13,107,2017-11-08 13:47:52,0
+42153,11,1,17,319,2017-11-07 05:07:17,0
+201182,9,2,13,215,2017-11-07 11:16:10,0
+19142,2,2,13,122,2017-11-08 16:14:49,0
+43692,2,1,8,435,2017-11-08 11:08:12,0
+85625,18,1,19,107,2017-11-07 14:45:15,0
+96038,12,1,12,328,2017-11-08 00:32:10,0
+26375,47,1,19,484,2017-11-09 11:32:09,0
+122231,14,1,18,349,2017-11-07 03:07:03,0
+123239,18,1,19,439,2017-11-07 11:55:51,0
+101074,3,2,13,280,2017-11-08 11:13:30,0
+322344,3,1,19,280,2017-11-09 07:10:24,0
+324,18,1,19,439,2017-11-07 01:40:49,0
+44673,12,1,15,205,2017-11-07 05:10:23,0
+13142,3,1,25,280,2017-11-07 04:29:08,0
+283838,12,1,19,140,2017-11-09 13:06:10,0
+45609,1,1,17,17,2017-11-07 02:31:58,0
+104922,2,1,25,219,2017-11-06 23:24:09,0
+53786,9,1,19,134,2017-11-07 07:47:53,0
+5147,9,1,13,466,2017-11-08 12:54:33,0
+2153,12,1,19,265,2017-11-07 09:10:12,0
+119870,15,1,9,386,2017-11-07 11:09:35,0
+105540,15,1,41,412,2017-11-09 08:39:30,0
+119204,15,1,20,245,2017-11-09 04:41:13,0
+46568,3,1,13,379,2017-11-07 04:59:58,0
+49649,2,1,26,212,2017-11-09 09:18:50,0
+14955,26,1,19,121,2017-11-08 10:45:56,0
+9840,12,1,30,328,2017-11-08 11:19:28,0
+280785,1,1,36,17,2017-11-09 00:34:15,0
+226759,13,1,32,477,2017-11-08 14:25:54,0
+7153,9,1,13,107,2017-11-09 14:15:17,0
+137135,8,1,19,145,2017-11-09 05:41:51,0
+51627,23,1,49,153,2017-11-08 22:34:41,0
+100152,14,1,13,480,2017-11-09 12:04:22,0
+75588,8,1,19,145,2017-11-08 00:04:26,0
+89198,12,1,17,178,2017-11-09 02:43:12,0
+53960,2,2,9,205,2017-11-08 08:37:01,0
+71809,2,1,19,122,2017-11-08 09:37:06,0
+24795,13,3543,748,469,2017-11-07 16:28:51,0
+315264,1,1,71,24,2017-11-09 13:26:01,0
+171401,9,1,14,466,2017-11-08 13:20:10,0
+124136,12,2,9,178,2017-11-09 05:28:39,0
+38577,32,1,9,376,2017-11-08 12:32:19,0
+20904,3,1,18,280,2017-11-08 13:02:59,0
+93021,12,1,19,340,2017-11-09 06:47:26,0
+77184,12,1,12,178,2017-11-09 06:33:02,0
+29646,9,1,8,334,2017-11-07 05:28:21,0
+38242,9,1,18,244,2017-11-09 15:03:17,0
+30443,15,1,13,245,2017-11-07 11:21:31,0
+197864,3,1,13,280,2017-11-07 03:06:36,0
+119920,2,1,16,237,2017-11-07 15:54:00,0
+29315,3,1,16,280,2017-11-08 14:16:14,0
+138927,64,1,31,459,2017-11-07 02:50:03,0
+160058,15,1,23,245,2017-11-08 11:12:57,0
+14759,6,1,17,459,2017-11-07 11:52:03,0
+83098,3,1,9,280,2017-11-08 14:37:41,0
+27948,29,1,13,213,2017-11-07 09:35:43,1
+197965,6,1,19,110,2017-11-07 01:33:17,0
+105587,18,1,607,107,2017-11-07 09:31:01,0
+76900,11,1,15,137,2017-11-09 13:03:14,0
+28140,15,1,22,480,2017-11-08 15:06:19,0
+48054,2,1,17,219,2017-11-09 13:42:36,0
+109743,3,1,19,280,2017-11-07 06:59:32,0
+63762,12,1,8,424,2017-11-08 07:18:34,0
+30587,3,1,13,480,2017-11-08 13:27:55,0
+87337,23,1,19,153,2017-11-08 09:17:21,0
+251233,3,1,13,280,2017-11-08 00:15:14,0
+42190,33,3032,607,347,2017-11-07 15:09:09,0
+116472,2,1,12,364,2017-11-09 12:57:18,0
+105727,2,1,14,243,2017-11-07 07:46:56,0
+110880,12,1,13,245,2017-11-07 17:29:44,0
+127401,15,1,8,278,2017-11-07 09:46:33,0
+219430,18,1,8,121,2017-11-08 04:58:38,0
+59572,10,1,13,113,2017-11-07 02:28:42,0
+125222,12,1,8,105,2017-11-09 09:47:14,0
+96708,3,1,19,280,2017-11-09 00:44:59,0
+112775,9,1,22,127,2017-11-09 01:08:09,0
+29804,12,1,19,140,2017-11-09 08:30:29,0
+61667,2,1,19,477,2017-11-07 22:27:16,0
+111324,15,1,13,386,2017-11-09 03:05:19,0
+73238,18,1,17,107,2017-11-08 11:20:13,0
+44503,8,1,13,145,2017-11-07 23:29:53,0
+62117,15,1,19,111,2017-11-08 18:09:46,0
+15367,15,1,19,245,2017-11-08 04:04:53,0
+106040,12,1,18,245,2017-11-07 12:15:35,0
+95820,170,3543,748,347,2017-11-08 10:27:19,0
+116557,2,1,20,219,2017-11-09 08:57:11,0
+124136,18,1,19,107,2017-11-06 16:32:59,0
+123100,29,1,18,456,2017-11-08 14:58:43,0
+60752,1,1,19,134,2017-11-08 01:51:35,0
+36938,15,1,19,245,2017-11-08 06:16:34,0
+86521,3,1,41,409,2017-11-07 02:26:59,0
+198768,12,1,19,178,2017-11-07 00:16:35,0
+86860,18,1,19,439,2017-11-07 10:59:29,0
+113358,3,1,10,173,2017-11-07 09:58:49,0
+15616,25,1,35,259,2017-11-08 08:36:15,0
+8401,25,1,9,259,2017-11-08 22:47:09,0
+169293,3,1,6,280,2017-11-07 13:12:57,0
+186588,3,1,12,424,2017-11-06 23:43:46,0
+54376,3,1,19,417,2017-11-07 09:13:04,0
+37774,9,2,9,234,2017-11-06 16:29:40,0
+18927,3,1,47,137,2017-11-08 03:56:30,0
+179753,9,1,13,466,2017-11-08 02:14:55,0
+122431,9,1,13,244,2017-11-07 08:42:14,0
+283194,26,1,37,121,2017-11-09 07:20:05,0
+9182,21,1,15,232,2017-11-08 04:20:01,0
+96298,12,1,19,328,2017-11-08 04:40:31,0
+12711,15,1,8,315,2017-11-07 07:52:20,0
+131891,3,1,13,280,2017-11-08 05:31:30,0
+77233,3,1,8,280,2017-11-07 05:01:23,0
+6871,26,1,19,266,2017-11-06 20:52:00,0
+39756,2,2,19,205,2017-11-09 08:13:53,0
+266393,15,1,19,245,2017-11-07 22:38:25,0
+14895,19,0,21,213,2017-11-09 13:56:36,0
+187146,12,1,20,245,2017-11-07 13:45:41,0
+103023,14,1,13,489,2017-11-08 02:26:41,0
+105339,12,1,19,178,2017-11-09 03:41:43,0
+125141,3,1,14,280,2017-11-07 05:53:22,0
+129385,3,1,13,280,2017-11-07 01:12:07,0
+25588,2,1,25,205,2017-11-09 05:38:11,0
+148208,14,1,43,123,2017-11-07 08:23:13,0
+117463,9,1,13,334,2017-11-07 13:33:20,0
+83614,14,1,17,489,2017-11-09 10:45:18,0
+167836,14,1,19,463,2017-11-08 07:08:13,0
+60348,2,1,19,205,2017-11-09 01:39:54,0
+53964,94,1,9,361,2017-11-09 12:32:24,0
+67249,18,1,19,107,2017-11-07 02:09:00,0
+50924,7,1,13,101,2017-11-09 05:23:23,0
+40400,3,1,14,280,2017-11-07 07:14:51,0
+116987,15,1,8,245,2017-11-09 06:17:57,0
+36339,11,1,19,173,2017-11-09 07:42:05,0
+1117,15,1,25,315,2017-11-08 12:31:09,0
+350627,3,1,19,371,2017-11-09 15:29:39,0
+40631,15,1,19,245,2017-11-07 13:40:33,0
+122820,12,1,19,245,2017-11-09 01:40:52,0
+83595,18,1,13,439,2017-11-08 15:17:07,0
+63062,3,1,10,280,2017-11-08 18:51:11,0
+146600,24,1,13,105,2017-11-09 04:00:49,0
+91360,12,1,15,178,2017-11-07 09:10:27,0
+273335,18,1,22,107,2017-11-07 16:10:26,0
+325395,9,1,19,234,2017-11-09 12:57:43,0
+84410,3,1,13,130,2017-11-08 08:02:16,0
+86767,2,1,3,469,2017-11-08 14:57:50,0
+317816,15,1,19,3,2017-11-09 08:21:07,0
+44772,17,1,13,280,2017-11-08 07:01:11,0
+110703,9,1,20,334,2017-11-07 01:42:34,0
+79066,2,2,9,205,2017-11-09 06:14:19,0
+3363,2,1,6,477,2017-11-08 09:01:40,0
+204164,64,1,13,459,2017-11-07 04:12:14,0
+212034,3,1,13,19,2017-11-08 09:40:35,0
+81895,9,1,13,232,2017-11-09 09:55:03,0
+50087,3,1,18,280,2017-11-08 09:53:04,0
+72357,12,1,27,409,2017-11-07 19:09:24,0
+56298,12,1,19,245,2017-11-07 13:08:15,0
+45745,2,1,17,477,2017-11-08 15:58:48,0
+116956,15,1,17,245,2017-11-07 02:05:33,0
+114276,3,1,22,205,2017-11-07 10:38:10,0
+277327,9,1,13,334,2017-11-08 04:06:06,0
+197225,3,1,13,280,2017-11-08 16:00:51,0
+75683,3,1,19,280,2017-11-09 03:34:24,0
+275230,1,1,13,134,2017-11-08 06:15:49,0
+17149,12,2,9,259,2017-11-08 14:06:50,0
+53454,3,1,1,135,2017-11-07 16:31:59,0
+32560,9,1,22,107,2017-11-09 00:55:36,0
+189570,18,1,10,107,2017-11-07 06:31:13,0
+122747,6,1,19,459,2017-11-06 18:44:35,0
+168248,9,1,11,127,2017-11-09 12:09:42,0
+227490,3,1,12,488,2017-11-08 02:49:39,0
+315661,23,1,19,153,2017-11-09 05:16:16,0
+48212,18,1,13,121,2017-11-09 12:38:54,0
+6256,6,1,13,459,2017-11-07 08:41:59,0
+116827,14,1,13,463,2017-11-08 01:15:16,0
+81013,9,1,15,232,2017-11-09 13:06:57,0
+78966,1,1,15,134,2017-11-09 14:25:30,0
+51992,9,1,9,442,2017-11-09 04:05:43,0
+84876,13,1,13,477,2017-11-09 07:50:17,0
+944,12,1,19,481,2017-11-07 23:17:38,0
+111638,58,1,18,120,2017-11-09 11:49:41,0
+76822,1,1,6,153,2017-11-07 11:01:32,0
+5314,2,1,19,477,2017-11-07 17:00:13,0
+8968,15,1,19,130,2017-11-09 01:32:15,0
+36213,2,1,22,205,2017-11-07 04:44:00,0
+88202,3,1,13,379,2017-11-07 03:25:56,0
+25097,12,1,17,259,2017-11-07 14:33:24,0
+351137,3,1,1,417,2017-11-09 11:58:14,0
+163178,18,1,19,121,2017-11-07 01:10:36,0
+110124,15,1,13,430,2017-11-07 08:52:38,0
+16375,2,1,19,435,2017-11-07 13:44:47,0
+210644,3,1,13,442,2017-11-06 23:04:00,0
+125397,8,1,10,145,2017-11-07 00:43:54,0
+139206,15,1,19,245,2017-11-07 13:53:47,0
+54203,12,1,20,245,2017-11-07 10:29:05,0
+122744,7,1,25,101,2017-11-07 08:34:46,0
+14721,6,1,12,459,2017-11-08 06:59:57,0
+52401,46,0,38,347,2017-11-08 03:15:27,0
+39436,14,1,58,463,2017-11-09 11:36:59,0
+12711,3,1,19,442,2017-11-08 00:14:15,0
+12524,2,1,6,205,2017-11-08 07:28:21,0
+18363,3,2,11,137,2017-11-08 15:28:25,0
+64756,12,1,13,265,2017-11-08 04:54:37,0
+37513,64,1,23,459,2017-11-07 12:34:17,0
+123837,9,1,13,442,2017-11-09 12:28:24,0
+77107,3,1,19,280,2017-11-08 07:23:59,0
+180105,12,1,46,259,2017-11-08 23:44:28,0
+199542,15,1,4,153,2017-11-08 06:22:40,0
+89861,18,1,13,107,2017-11-08 23:30:12,0
+60752,12,1,19,265,2017-11-08 09:29:00,0
+211976,14,1,19,442,2017-11-09 02:15:13,0
+114083,12,1,19,265,2017-11-09 15:32:43,0
+91611,12,1,13,245,2017-11-06 23:49:16,0
+94020,3,1,17,137,2017-11-07 06:59:13,0
+300014,2,1,19,243,2017-11-09 08:43:55,0
+262689,3,1,19,280,2017-11-08 16:54:32,0
+161649,19,0,29,213,2017-11-06 22:00:10,1
+15899,3,1,12,280,2017-11-07 02:03:59,0
+66378,15,1,19,130,2017-11-07 16:35:49,0
+101268,3,1,19,489,2017-11-06 23:35:41,0
+162814,12,1,44,19,2017-11-09 05:45:58,0
+55410,2,1,17,258,2017-11-07 13:07:35,0
+118563,26,1,13,266,2017-11-06 16:07:20,0
+265879,15,1,13,265,2017-11-08 13:02:09,0
+112464,3,1,36,280,2017-11-09 01:12:03,0
+24905,15,1,53,245,2017-11-08 15:31:37,0
+13739,3,1,8,280,2017-11-07 05:51:08,0
+37883,2,1,19,237,2017-11-07 23:39:02,0
+17137,7,1,1,101,2017-11-09 06:42:17,0
+122949,2,1,13,236,2017-11-07 01:15:46,0
+83429,14,1,17,134,2017-11-08 05:37:50,0
+105971,18,1,25,376,2017-11-07 09:40:28,0
+48615,12,1,19,265,2017-11-08 14:52:10,0
+17946,12,1,13,245,2017-11-08 11:54:32,0
+111025,9,1,25,215,2017-11-07 05:19:15,0
+228111,14,1,17,439,2017-11-08 01:54:29,0
+189693,3,1,19,489,2017-11-08 01:00:33,0
+10410,18,2,49,107,2017-11-08 01:18:45,0
+58786,2,1,13,122,2017-11-09 11:04:48,0
+5348,19,0,29,347,2017-11-07 08:59:09,0
+109119,14,1,13,349,2017-11-08 02:17:08,0
+163204,2,1,23,236,2017-11-09 02:14:45,0
+53856,3,1,13,424,2017-11-09 15:07:24,0
+123994,26,1,22,477,2017-11-09 15:51:12,0
+153871,3,1,22,379,2017-11-07 10:31:56,0
+13483,9,1,8,244,2017-11-07 14:52:46,0
+15760,6,1,47,125,2017-11-07 04:52:08,0
+112608,9,1,15,127,2017-11-08 13:47:05,0
+92823,24,1,13,105,2017-11-07 01:47:33,0
+137052,23,1,18,153,2017-11-07 03:38:20,0
+7991,11,1,19,219,2017-11-07 11:16:37,0
+75574,9,1,35,490,2017-11-09 05:12:08,0
+247802,9,1,13,445,2017-11-08 03:23:46,0
+100311,2,1,12,401,2017-11-08 10:32:21,0
+96523,9,1,19,107,2017-11-09 11:48:12,0
+237391,3,1,22,280,2017-11-08 02:31:01,0
+80466,24,1,13,105,2017-11-08 02:03:25,0
+33843,26,1,15,121,2017-11-09 02:45:11,0
+9419,20,1,20,259,2017-11-07 14:49:41,0
+118123,12,1,17,265,2017-11-09 15:46:33,0
+54520,9,1,36,466,2017-11-07 09:12:57,0
+1204,3,1,19,280,2017-11-09 06:52:18,0
+114276,14,1,12,349,2017-11-07 13:03:23,0
+70656,1,2,9,125,2017-11-08 13:04:26,0
+24781,9,1,13,442,2017-11-07 01:38:52,0
+125679,12,1,19,140,2017-11-09 12:00:09,0
+116896,3,1,30,173,2017-11-06 16:50:40,0
+30865,12,1,3,265,2017-11-09 04:59:45,0
+48587,25,2,5,259,2017-11-09 10:25:24,0
+39428,3,1,19,409,2017-11-08 22:56:20,0
+147609,6,1,19,459,2017-11-08 07:22:41,0
+55685,13,1,13,477,2017-11-09 04:32:22,0
+45327,18,1,10,107,2017-11-08 10:11:46,0
+110795,9,1,18,334,2017-11-07 13:58:13,0
+80193,3,1,19,280,2017-11-08 13:36:34,0
+262704,7,2,35,101,2017-11-08 11:55:54,0
+4324,2,1,19,205,2017-11-08 09:16:22,0
+175643,8,1,18,145,2017-11-08 09:36:24,0
+66644,2,1,6,469,2017-11-09 15:52:03,0
+94496,15,1,31,111,2017-11-07 09:44:05,0
+9964,9,1,19,466,2017-11-08 13:07:21,0
+299504,14,1,18,349,2017-11-09 00:34:02,0
+27507,15,1,18,278,2017-11-09 15:39:56,0
+211537,8,1,13,145,2017-11-06 23:06:27,0
+73648,18,1,32,107,2017-11-08 13:28:23,0
+92735,1,1,15,349,2017-11-09 12:46:27,0
+59692,12,1,13,259,2017-11-09 08:52:57,0
+107091,2,1,13,477,2017-11-07 13:01:36,0
+16970,13,1,13,400,2017-11-08 03:01:05,0
+118400,25,1,97,259,2017-11-09 11:02:18,0
+2941,3,1,13,211,2017-11-08 16:10:51,0
+12058,12,1,13,328,2017-11-07 21:21:53,0
+36213,2,2,17,205,2017-11-07 05:48:21,0
+119369,18,1,19,121,2017-11-09 13:03:50,0
+130760,3,1,9,442,2017-11-07 07:45:41,0
+48615,14,1,20,379,2017-11-09 07:57:32,0
+18869,9,1,13,258,2017-11-08 14:39:42,0
+34387,47,1,6,484,2017-11-09 11:41:57,0
+104868,9,1,19,442,2017-11-09 07:47:30,0
+37836,3,1,19,115,2017-11-09 05:41:44,0
+221358,3,1,13,280,2017-11-08 01:01:49,0
+35038,9,1,3,134,2017-11-06 23:07:02,0
+38722,12,1,13,265,2017-11-08 15:14:26,0
+123885,3,1,13,452,2017-11-09 10:43:27,0
+14025,24,2,13,178,2017-11-09 08:57:36,0
+197144,2,1,19,435,2017-11-08 13:50:16,0
+43686,11,1,20,219,2017-11-09 13:47:46,0
+50197,18,1,19,121,2017-11-07 18:17:44,0
+40067,1,2,22,125,2017-11-09 01:09:55,0
+48142,12,1,16,178,2017-11-06 16:36:20,0
+89946,3,1,13,489,2017-11-08 04:35:52,0
+60884,28,1,15,135,2017-11-08 08:33:00,0
+82971,2,1,19,237,2017-11-09 01:25:33,0
+125307,21,1,16,128,2017-11-08 03:42:19,0
+90509,12,1,17,326,2017-11-07 16:28:55,0
+318934,13,1,19,469,2017-11-09 13:01:02,0
+81859,20,1,22,478,2017-11-07 07:13:03,0
+150129,2,1,17,452,2017-11-08 06:23:16,0
+237196,2,1,41,477,2017-11-09 07:23:28,0
+58280,15,1,1,430,2017-11-07 03:54:31,0
+116664,1,1,19,137,2017-11-07 23:52:11,0
+53836,2,1,11,219,2017-11-07 04:35:48,0
+22394,14,1,3,379,2017-11-08 10:23:39,0
+95329,2,1,41,452,2017-11-09 09:09:24,0
+137878,15,1,13,245,2017-11-09 04:47:48,0
+3071,1,1,35,178,2017-11-07 05:58:56,0
+26995,26,1,27,121,2017-11-08 11:53:14,0
+78950,15,1,18,140,2017-11-07 05:23:20,0
+149748,25,1,26,259,2017-11-08 10:32:18,0
+13669,2,1,13,212,2017-11-06 23:05:54,0
+119369,13,1,19,477,2017-11-09 05:03:44,0
+32262,9,1,40,234,2017-11-09 15:14:14,0
+162109,3,1,19,19,2017-11-07 21:51:25,0
+95329,3,1,19,280,2017-11-07 02:44:43,0
+166784,2,1,19,212,2017-11-07 04:01:33,0
+4405,15,1,13,245,2017-11-07 16:25:02,0
+158793,3,1,13,280,2017-11-07 02:37:39,0
+214686,15,1,15,245,2017-11-08 13:49:19,0
+118562,9,1,19,107,2017-11-09 02:00:46,0
+11090,2,1,41,122,2017-11-08 23:32:35,0
+38653,21,1,19,128,2017-11-07 02:28:26,0
+151717,18,1,3,134,2017-11-09 12:54:08,0
+166006,28,1,28,135,2017-11-06 20:18:28,0
+137052,12,1,15,178,2017-11-08 12:23:43,0
+4446,3,1,13,115,2017-11-09 11:55:03,0
+69509,12,1,13,145,2017-11-07 06:30:33,0
+204671,12,1,19,212,2017-11-09 06:18:24,0
+210449,9,1,19,134,2017-11-07 00:32:50,0
+13073,9,1,22,244,2017-11-07 11:15:51,0
+141623,3,1,19,280,2017-11-07 04:50:18,0
+280614,18,1,18,107,2017-11-09 10:09:14,0
+58680,12,1,8,328,2017-11-07 03:49:33,0
+44595,3,1,37,280,2017-11-09 13:51:45,0
+391,12,1,28,245,2017-11-08 14:32:09,0
+99075,13,1,13,469,2017-11-08 14:32:52,0
+39338,12,1,19,245,2017-11-07 15:26:05,0
+125671,18,1,19,439,2017-11-09 03:27:05,0
+210975,64,2,13,459,2017-11-07 04:22:23,0
+144604,3,1,13,409,2017-11-07 06:31:52,0
+58789,14,1,13,349,2017-11-08 01:31:06,0
+34472,12,1,20,259,2017-11-08 08:34:46,0
+79981,12,1,39,259,2017-11-09 05:09:22,0
+80147,12,1,19,178,2017-11-09 01:57:01,0
+359350,15,1,13,315,2017-11-09 09:30:46,0
+80368,26,1,13,266,2017-11-07 15:47:17,0
+115650,3,1,19,280,2017-11-08 08:55:38,0
+154076,8,1,22,145,2017-11-08 05:34:04,0
+63137,9,1,13,489,2017-11-07 14:12:51,0
+77037,21,1,13,128,2017-11-07 01:52:20,0
+124219,12,1,3,328,2017-11-08 15:09:57,0
+81922,26,1,19,121,2017-11-09 07:11:58,0
+119870,2,1,1,477,2017-11-08 09:54:02,0
+123974,13,1,13,469,2017-11-07 06:00:56,0
+65785,14,1,12,401,2017-11-08 04:52:44,0
+9972,26,1,13,121,2017-11-08 00:31:55,0
+116281,3,1,47,480,2017-11-07 06:05:40,0
+65785,37,1,3,21,2017-11-08 03:33:01,0
+99897,12,1,19,245,2017-11-08 16:12:07,0
+50702,14,1,11,480,2017-11-07 16:18:09,0
+120694,15,1,13,140,2017-11-08 02:11:16,0
+31724,3,1,13,173,2017-11-07 05:08:29,0
+109995,12,1,19,265,2017-11-07 06:11:48,0
+120729,3,1,13,205,2017-11-08 05:03:26,0
+60381,3,1,22,280,2017-11-09 03:56:19,0
+262492,1,1,19,137,2017-11-08 06:04:24,0
+181911,14,1,19,439,2017-11-07 05:04:14,0
+58570,21,1,22,128,2017-11-08 02:12:25,0
+95766,12,2,37,265,2017-11-08 11:25:10,0
+84728,15,1,13,245,2017-11-07 16:01:32,0
+105587,2,1,19,205,2017-11-07 17:39:14,0
+53929,2,1,19,205,2017-11-07 14:59:31,0
+53625,2,1,19,435,2017-11-09 11:40:33,0
+200755,12,1,19,178,2017-11-09 10:56:38,0
+72028,2,1,19,477,2017-11-08 11:46:48,0
+5178,12,1,31,259,2017-11-08 14:36:10,0
+107229,12,1,19,178,2017-11-09 08:26:55,0
+41313,12,1,18,265,2017-11-08 11:47:51,0
+59837,12,1,19,178,2017-11-07 09:07:46,0
+103737,3,1,19,452,2017-11-08 09:20:06,0
+41383,9,1,13,445,2017-11-08 07:57:46,0
+81463,12,1,14,205,2017-11-09 11:46:57,0
+319862,3,1,13,489,2017-11-09 13:52:27,0
+26208,2,1,19,477,2017-11-07 03:04:31,0
+84896,2,1,3,477,2017-11-07 10:12:24,0
+167031,14,1,19,463,2017-11-09 04:08:08,0
+57230,3,1,19,280,2017-11-08 10:30:13,0
+37801,11,1,6,481,2017-11-06 19:28:15,0
+20996,8,1,25,145,2017-11-09 11:57:52,0
+49383,2,1,20,477,2017-11-07 15:08:09,0
+183462,12,1,9,122,2017-11-08 03:29:00,0
+19069,3,1,19,19,2017-11-09 07:07:41,0
+86767,12,1,19,178,2017-11-07 08:39:58,0
+41437,3,1,6,173,2017-11-07 10:12:30,0
+89652,15,1,18,245,2017-11-06 17:58:25,0
+14764,12,1,17,178,2017-11-08 03:11:40,0
+115445,9,1,25,107,2017-11-09 14:45:54,0
+258379,3,1,13,379,2017-11-08 11:02:38,0
+34854,11,1,13,481,2017-11-07 23:19:44,0
+113721,23,1,19,153,2017-11-08 08:55:30,0
+61191,12,1,32,105,2017-11-09 13:02:00,0
+99226,18,1,13,107,2017-11-09 12:51:58,0
+48008,2,1,1,435,2017-11-08 04:24:11,0
+46566,3,1,19,280,2017-11-08 03:21:17,0
+90949,26,1,13,477,2017-11-08 08:38:04,0
+360318,12,1,18,265,2017-11-08 21:47:18,0
+42190,18,1,13,134,2017-11-08 12:01:27,0
+90408,9,1,56,489,2017-11-07 22:58:23,0
+45745,8,1,5,145,2017-11-08 05:08:36,0
+125736,1,1,3,153,2017-11-07 01:21:42,0
+85625,15,1,37,111,2017-11-08 09:35:44,0
+95111,17,1,6,134,2017-11-08 05:03:07,0
+100929,64,1,19,459,2017-11-08 02:29:22,0
+206780,2,1,18,452,2017-11-07 03:46:20,0
+51992,2,1,2,477,2017-11-07 10:44:38,0
+113236,2,1,25,401,2017-11-08 07:37:43,0
+125141,3,1,19,280,2017-11-07 06:23:06,0
+29502,15,1,9,278,2017-11-07 03:45:35,0
+90874,2,1,23,122,2017-11-07 16:32:20,0
+49553,18,1,19,134,2017-11-07 14:25:46,0
+140358,14,1,11,123,2017-11-07 02:18:36,0
+13403,12,1,19,105,2017-11-09 11:18:30,0
+125672,8,1,18,145,2017-11-07 05:30:43,0
+36150,2,1,15,205,2017-11-09 14:02:35,0
+153416,9,1,18,134,2017-11-06 16:06:14,0
+3178,18,1,18,121,2017-11-08 18:06:43,0
+125984,21,1,22,128,2017-11-08 15:52:46,0
+63986,9,1,20,442,2017-11-09 14:22:04,0
+118238,12,1,19,328,2017-11-07 01:20:54,0
+121087,2,1,19,469,2017-11-09 01:53:58,0
+25095,13,1,15,477,2017-11-08 03:57:50,0
+58203,3,1,19,379,2017-11-08 14:29:49,0
+101929,15,1,53,265,2017-11-08 01:50:40,0
+64620,25,1,27,259,2017-11-08 11:02:19,0
+20309,3,1,19,280,2017-11-09 14:57:02,0
+38211,13,1,13,400,2017-11-08 04:31:48,0
+70432,12,1,28,328,2017-11-07 00:02:12,0
+149608,24,1,17,178,2017-11-06 16:54:50,0
+26995,12,2,19,245,2017-11-08 20:16:15,0
+18667,8,1,8,145,2017-11-07 07:24:13,0
+166638,9,1,17,466,2017-11-09 05:45:46,0
+43180,3,1,19,280,2017-11-08 10:33:30,0
+86947,2,1,20,212,2017-11-07 06:07:01,0
+78950,1,1,13,349,2017-11-07 05:58:26,0
+96940,12,1,13,259,2017-11-07 01:12:12,0
+3268,1,1,19,153,2017-11-08 11:42:30,0
+3896,12,1,14,328,2017-11-09 12:28:02,0
+46366,12,1,13,265,2017-11-08 00:09:42,0
+57676,1,1,41,137,2017-11-08 14:30:12,0
+8645,13,1,6,477,2017-11-07 10:17:01,0
+53929,20,1,17,259,2017-11-08 22:54:08,0
+43793,15,1,13,245,2017-11-08 10:45:22,0
+95125,29,1,17,343,2017-11-08 01:30:01,0
+835,28,1,18,135,2017-11-09 14:12:18,0
+95766,18,1,6,439,2017-11-09 05:27:38,0
+114220,12,1,19,124,2017-11-09 07:15:14,0
+100180,12,1,1,178,2017-11-06 16:18:49,0
+523,2,1,19,122,2017-11-09 12:33:53,0
+55742,9,1,17,134,2017-11-06 17:57:43,0
+19248,9,2,9,215,2017-11-08 14:00:31,0
+34587,17,1,19,356,2017-11-07 23:37:02,0
+106045,12,1,19,265,2017-11-08 05:26:31,0
+73270,18,1,19,376,2017-11-07 10:29:05,0
+86474,12,1,8,178,2017-11-08 17:00:08,0
+55424,12,1,15,265,2017-11-09 12:14:09,0
+33503,3,1,19,280,2017-11-07 10:03:12,0
+262619,9,1,18,244,2017-11-08 02:03:23,0
+105603,26,1,22,266,2017-11-09 05:06:04,0
+201182,3,2,19,379,2017-11-08 13:07:42,0
+56719,15,1,19,140,2017-11-08 08:38:04,0
+40337,21,1,19,128,2017-11-09 03:28:56,0
+105649,9,1,31,442,2017-11-07 08:48:16,0
+306414,9,1,19,258,2017-11-09 09:27:39,0
+2189,3,1,13,442,2017-11-08 15:59:13,0
+112243,3,1,22,280,2017-11-08 08:45:50,0
+110529,3,1,13,280,2017-11-08 13:32:39,0
+106293,21,1,19,128,2017-11-07 23:13:43,0
+197976,12,1,19,245,2017-11-09 04:40:08,0
+132431,18,1,22,121,2017-11-09 10:17:07,0
+95967,12,1,13,265,2017-11-08 07:35:38,0
+204809,14,1,8,480,2017-11-09 08:43:54,0
+78124,2,1,19,219,2017-11-09 13:31:04,0
+50087,15,1,9,245,2017-11-09 05:55:49,0
+187273,23,1,13,153,2017-11-09 09:38:36,0
+47118,7,1,15,101,2017-11-07 10:42:52,0
+147144,12,1,19,178,2017-11-08 12:16:32,0
+16244,3,1,27,173,2017-11-08 05:53:01,0
+31358,3,1,19,280,2017-11-08 20:30:24,0
+18703,2,2,8,205,2017-11-07 07:39:23,0
+5325,12,1,13,178,2017-11-09 08:36:44,0
+3266,12,1,13,328,2017-11-09 05:39:54,0
+73487,9,2,11,134,2017-11-08 02:44:41,0
+117272,15,1,18,245,2017-11-06 16:28:23,0
+8827,15,1,19,245,2017-11-07 06:44:53,0
+47146,2,1,13,243,2017-11-09 04:21:05,0
+102543,3,1,13,466,2017-11-09 10:36:31,0
+152360,3,1,18,280,2017-11-08 11:14:07,0
+49600,45,2,10,411,2017-11-09 09:40:50,0
+100375,18,1,17,376,2017-11-07 03:39:02,0
+178866,15,1,8,245,2017-11-07 05:49:15,0
+98344,12,1,3,409,2017-11-09 01:46:22,0
+67322,2,1,18,219,2017-11-09 01:15:09,0
+40342,2,1,13,258,2017-11-07 00:28:28,0
+193700,1,1,13,118,2017-11-07 00:29:31,0
+148126,13,1,18,477,2017-11-07 07:32:30,0
+14094,64,1,70,459,2017-11-07 15:22:25,0
+9886,2,1,13,212,2017-11-08 15:12:58,0
+4653,12,1,19,245,2017-11-08 15:19:13,0
+6123,13,1,18,477,2017-11-07 16:30:20,0
+32457,21,1,35,232,2017-11-08 17:14:49,0
+4429,3,1,32,442,2017-11-08 03:23:40,0
+319009,13,1,19,477,2017-11-09 03:40:35,0
+83449,3,1,19,480,2017-11-08 13:51:04,0
+58529,3,1,8,115,2017-11-08 15:28:34,0
+137780,21,1,17,128,2017-11-08 21:24:58,0
+113958,12,1,13,178,2017-11-08 01:15:09,0
+100182,3,1,13,452,2017-11-09 07:50:21,0
+48646,29,1,15,343,2017-11-07 02:37:41,0
+180506,15,1,19,130,2017-11-08 04:52:44,0
+101074,3,1,37,173,2017-11-08 04:07:03,0
+4486,15,1,31,245,2017-11-08 12:56:13,0
+103019,20,1,22,259,2017-11-08 16:10:47,0
+251823,3,1,19,402,2017-11-08 08:40:25,0
+123759,18,1,27,121,2017-11-06 17:46:33,0
+42041,18,1,13,121,2017-11-07 15:01:35,0
+51544,3,1,19,280,2017-11-08 07:27:51,0
+106535,3,1,18,280,2017-11-08 03:09:34,0
+42240,9,1,19,489,2017-11-08 11:56:58,0
+242106,14,1,13,463,2017-11-08 07:15:20,0
+26703,2,1,13,469,2017-11-09 04:15:05,0
+55213,2,1,17,452,2017-11-09 00:01:54,0
+165072,3,1,6,424,2017-11-08 03:42:07,0
+189412,12,1,11,140,2017-11-08 05:24:15,0
+43057,3,1,19,280,2017-11-08 13:41:50,0
+18703,21,1,19,128,2017-11-09 08:52:34,0
+30565,18,1,25,121,2017-11-09 07:23:04,0
+80560,9,2,9,442,2017-11-09 10:11:29,0
+73516,15,1,13,480,2017-11-09 14:21:26,0
+60945,26,1,26,266,2017-11-09 07:31:21,0
+16453,6,1,23,459,2017-11-08 04:15:56,0
+39209,15,1,19,245,2017-11-07 23:55:31,0
+207893,15,1,17,386,2017-11-08 18:05:49,0
+100629,3,1,19,280,2017-11-09 15:24:55,0
+50737,3,1,19,379,2017-11-07 03:55:00,0
+93027,2,1,22,212,2017-11-07 07:56:54,0
+17836,3,1,13,19,2017-11-07 01:32:01,0
+48282,23,1,13,30,2017-11-09 11:22:51,0
+119369,11,1,12,219,2017-11-08 12:07:26,0
+119531,9,2,17,145,2017-11-09 08:14:56,0
+12184,3,1,18,489,2017-11-09 11:58:19,0
+198822,18,1,14,134,2017-11-07 08:35:30,0
+33607,2,1,19,237,2017-11-09 04:53:47,0
+69332,3,1,1,280,2017-11-07 08:47:25,0
+67037,2,1,17,237,2017-11-07 08:29:11,0
+58535,1,1,19,150,2017-11-07 06:25:31,0
+35616,64,1,10,459,2017-11-07 06:50:22,0
+100324,2,1,13,219,2017-11-07 11:43:43,0
+42384,25,1,42,259,2017-11-08 11:32:03,0
+85107,3,1,11,489,2017-11-07 10:20:39,0
+69070,3,1,19,442,2017-11-07 04:45:34,0
+198958,2,1,3,477,2017-11-07 09:13:31,0
+36383,3,1,22,280,2017-11-08 04:10:33,0
+103199,13,1,22,477,2017-11-08 08:51:11,0
+110112,3,1,8,480,2017-11-09 11:39:09,0
+17149,3,1,8,417,2017-11-09 07:01:18,0
+89458,15,1,19,386,2017-11-09 12:18:41,0
+84972,14,1,12,134,2017-11-08 02:04:09,0
+55024,21,1,20,128,2017-11-07 06:53:06,0
+111145,12,1,18,259,2017-11-07 05:15:15,0
+35221,8,1,10,145,2017-11-07 04:07:22,0
+53770,3,1,32,280,2017-11-08 12:39:42,0
+75539,6,1,13,459,2017-11-09 12:38:12,0
+66258,3,1,19,280,2017-11-07 13:12:02,0
+1881,8,1,19,259,2017-11-07 11:26:01,0
+72936,15,1,13,245,2017-11-07 01:38:45,0
+188739,2,1,13,377,2017-11-07 03:41:33,0
+172469,15,1,16,245,2017-11-08 15:19:58,0
+119317,3,1,17,280,2017-11-08 14:54:49,0
+92645,21,1,41,128,2017-11-06 23:31:30,0
+120163,8,1,19,145,2017-11-07 14:26:19,0
+106680,3,1,15,280,2017-11-08 01:51:20,0
+115119,47,1,14,484,2017-11-09 11:03:45,0
+39517,12,1,13,245,2017-11-08 12:50:31,0
+207511,1,1,10,134,2017-11-09 12:43:55,0
+116831,3,1,15,30,2017-11-09 12:43:19,0
+26814,15,1,11,245,2017-11-08 13:37:19,0
+92618,3,1,37,280,2017-11-09 05:30:33,0
+28780,9,1,28,127,2017-11-09 12:22:30,0
+268241,35,1,19,274,2017-11-08 04:57:33,0
+87696,9,1,19,334,2017-11-09 00:59:55,0
+49602,18,1,17,107,2017-11-07 12:50:15,0
+39020,12,1,10,245,2017-11-09 05:10:18,0
+108170,8,1,17,145,2017-11-08 18:42:15,0
+117018,9,1,17,134,2017-11-09 04:40:49,0
+86452,13,1,19,477,2017-11-08 23:29:35,0
+69411,3,1,18,442,2017-11-07 12:03:21,0
+69577,15,1,13,278,2017-11-09 11:48:19,0
+42404,12,1,26,409,2017-11-09 04:02:10,0
+79857,15,1,19,118,2017-11-09 09:43:50,0
+53960,2,1,19,205,2017-11-07 07:53:04,0
+42245,12,1,15,265,2017-11-08 03:24:20,0
+199172,12,1,19,245,2017-11-07 12:12:25,0
+55763,14,1,27,134,2017-11-07 04:59:11,0
+69136,3,1,14,280,2017-11-09 05:05:26,0
+18772,3,1,37,173,2017-11-08 04:54:23,0
+94174,1,1,19,134,2017-11-07 01:25:23,0
+178416,9,1,23,215,2017-11-06 16:03:12,0
+102141,9,1,22,466,2017-11-07 04:05:32,0
+39493,3,1,10,280,2017-11-07 06:31:49,0
+75595,12,1,19,145,2017-11-07 02:59:36,0
+80995,21,1,19,232,2017-11-07 11:45:22,0
+56731,15,1,27,386,2017-11-08 00:03:46,0
+4784,9,1,6,232,2017-11-08 14:06:10,0
+110309,29,1,15,343,2017-11-08 01:37:19,0
+70280,15,1,13,3,2017-11-07 12:59:51,0
+204304,9,1,25,466,2017-11-09 15:00:49,0
+102990,25,1,17,259,2017-11-07 05:54:35,0
+74447,12,1,13,178,2017-11-08 10:03:30,0
+40929,12,1,13,265,2017-11-08 05:53:09,0
+26726,12,1,13,259,2017-11-09 12:35:26,0
+32305,15,1,15,153,2017-11-08 10:36:22,0
+68303,12,1,19,245,2017-11-08 23:45:44,0
+41818,3,1,19,137,2017-11-07 06:45:41,0
+34840,12,1,19,105,2017-11-09 10:13:28,0
+48212,12,1,20,140,2017-11-09 14:15:41,0
+49138,32,1,19,376,2017-11-08 11:29:40,0
+174062,3,1,10,130,2017-11-07 08:16:56,0
+123947,21,1,34,232,2017-11-08 01:47:54,0
+105654,18,1,26,107,2017-11-09 00:27:15,0
+68382,12,1,13,242,2017-11-07 01:32:10,0
+12129,12,1,19,178,2017-11-07 15:22:40,0
+76989,12,1,32,140,2017-11-09 14:03:31,0
+195844,18,1,19,439,2017-11-07 06:31:19,0
+105587,18,1,14,107,2017-11-09 15:20:48,0
+105475,3,1,17,115,2017-11-06 16:19:58,0
+232636,29,1,13,213,2017-11-08 08:48:20,0
+57757,15,1,10,245,2017-11-07 04:22:55,0
+239741,9,1,6,134,2017-11-07 22:29:28,0
+53660,18,1,13,121,2017-11-09 00:18:09,0
+75808,27,1,19,122,2017-11-08 14:04:41,0
+91574,2,1,18,205,2017-11-07 02:43:37,0
+75813,18,1,13,134,2017-11-09 12:58:59,0
+69173,12,1,19,178,2017-11-07 04:38:05,0
+145205,2,1,3,122,2017-11-07 04:38:22,0
+150786,9,1,17,215,2017-11-06 23:42:19,0
+3835,12,1,15,178,2017-11-07 02:50:19,0
+44725,3,1,25,280,2017-11-09 03:04:38,0
+45416,2,2,13,205,2017-11-08 15:47:08,0
+142083,14,1,13,134,2017-11-07 08:11:41,0
+5348,15,2,16,140,2017-11-08 09:20:16,0
+75431,13,1,18,477,2017-11-08 06:13:28,0
+148508,7,1,13,101,2017-11-07 10:11:11,0
+96547,15,1,19,480,2017-11-08 05:37:42,0
+114314,2,1,48,435,2017-11-07 20:09:26,0
+11051,12,1,19,135,2017-11-09 15:15:36,0
+120757,2,1,3,237,2017-11-07 03:22:33,0
+142062,14,1,6,379,2017-11-07 10:10:19,0
+33908,12,1,14,328,2017-11-07 03:23:18,0
+265588,14,1,13,379,2017-11-09 07:16:35,0
+45745,3,1,13,480,2017-11-08 02:43:46,0
+36934,9,1,6,334,2017-11-08 09:53:12,0
+303828,94,1,35,361,2017-11-09 15:29:19,0
+266572,1,1,19,134,2017-11-08 06:56:39,0
+165919,14,1,13,401,2017-11-07 03:30:23,0
+28597,3,1,19,280,2017-11-09 01:39:57,0
+129467,15,1,10,140,2017-11-08 03:57:13,0
+111639,2,1,3,205,2017-11-07 14:06:15,0
+90991,9,1,10,244,2017-11-09 14:46:44,0
+38326,3,1,13,280,2017-11-08 11:06:58,0
+127559,12,1,11,140,2017-11-06 23:14:12,0
+18446,2,1,10,205,2017-11-08 09:06:13,0
+214149,15,1,15,391,2017-11-08 10:07:43,0
+126222,2,1,19,205,2017-11-07 06:09:34,0
+156284,9,1,20,234,2017-11-09 15:03:50,0
+345635,2,1,36,435,2017-11-09 02:03:45,0
+184315,3,1,19,317,2017-11-08 08:12:39,0
+60650,3,1,18,480,2017-11-07 09:02:47,0
+27879,1,1,25,153,2017-11-07 04:40:48,0
+103555,3,1,18,409,2017-11-09 02:24:37,0
+53479,18,1,27,107,2017-11-07 13:34:39,0
+53960,2,1,13,205,2017-11-07 04:46:35,0
+15225,15,1,11,245,2017-11-07 10:35:36,0
+23450,1,2,30,134,2017-11-06 23:19:49,0
+266592,10,1,47,113,2017-11-08 10:39:17,1
+2076,21,1,20,232,2017-11-09 11:52:15,0
+167563,9,1,41,466,2017-11-09 10:09:01,0
+203604,18,1,19,107,2017-11-08 05:55:08,0
+121216,3,1,13,452,2017-11-07 11:43:19,0
+37234,1,1,13,134,2017-11-08 08:03:14,0
+115804,17,1,9,280,2017-11-09 03:18:07,0
+85894,21,1,19,232,2017-11-09 05:08:59,0
+105039,27,1,19,153,2017-11-09 03:17:19,0
+10195,13,1,8,477,2017-11-08 07:54:36,0
+85188,5,1,14,377,2017-11-07 07:20:39,0
+109800,11,1,13,173,2017-11-07 07:12:36,0
+41030,3,1,17,115,2017-11-07 10:52:25,0
+269985,19,40,24,213,2017-11-08 15:17:04,0
+47251,3,1,13,280,2017-11-07 08:41:56,0
+75858,8,2,9,145,2017-11-08 10:38:46,0
+85006,23,1,13,153,2017-11-08 19:57:33,0
+96971,28,1,19,135,2017-11-07 07:37:17,0
+115130,2,1,7,435,2017-11-07 13:26:36,0
+264729,14,1,37,379,2017-11-08 10:51:51,0
+3241,9,1,25,466,2017-11-09 08:42:39,0
+94758,12,1,1,265,2017-11-08 10:39:47,0
+52424,9,1,22,215,2017-11-07 16:39:47,0
+200107,15,1,8,430,2017-11-07 14:36:54,0
+75634,4,1,47,101,2017-11-08 02:18:48,0
+48240,3,2,9,480,2017-11-09 13:34:45,0
+34520,19,0,76,213,2017-11-08 02:59:36,0
+167025,27,1,37,122,2017-11-07 05:47:37,0
+114891,3,1,18,379,2017-11-09 06:20:14,0
+78526,9,1,28,215,2017-11-07 00:12:56,0
+125050,3,1,46,280,2017-11-07 03:58:34,0
+91031,14,1,8,480,2017-11-09 15:50:25,0
+75825,29,1,53,343,2017-11-09 06:23:05,0
+83453,14,1,8,463,2017-11-08 10:51:05,0
+85512,12,1,19,497,2017-11-07 12:21:11,0
+118475,3,1,19,211,2017-11-09 00:16:27,0
+7862,14,1,13,463,2017-11-07 16:06:53,0
+5348,12,1,13,265,2017-11-09 12:34:02,0
+108560,12,1,13,328,2017-11-09 07:32:42,0
+109674,15,1,37,245,2017-11-08 15:16:23,0
+51808,64,1,13,459,2017-11-07 13:15:40,0
+73487,14,1,47,379,2017-11-08 23:11:25,0
+97716,14,1,17,401,2017-11-08 12:18:31,0
+62916,6,1,37,459,2017-11-08 02:38:38,0
+253401,9,1,41,466,2017-11-08 15:10:49,0
+99927,12,1,19,178,2017-11-07 01:13:34,0
+13886,9,1,3,215,2017-11-07 01:51:11,0
+32788,3,1,32,424,2017-11-09 03:18:55,0
+114276,9,2,9,334,2017-11-08 03:02:33,0
+93780,23,1,22,153,2017-11-08 17:03:39,0
+105339,1,1,20,135,2017-11-07 11:39:32,0
+80163,1,1,19,452,2017-11-06 18:52:15,0
+8408,12,1,13,259,2017-11-07 00:04:00,0
+275683,9,1,20,489,2017-11-08 15:36:21,0
+280918,2,2,49,205,2017-11-09 11:53:31,0
+105069,11,1,13,219,2017-11-09 07:15:37,0
+107212,9,1,13,442,2017-11-09 03:12:55,0
+42139,18,1,13,107,2017-11-09 08:34:01,0
+100543,14,1,18,463,2017-11-08 04:42:31,0
+60271,3,1,19,173,2017-11-08 14:07:52,0
+3189,18,1,13,134,2017-11-08 07:57:13,0
+107802,3,1,6,442,2017-11-08 06:15:45,0
+73487,3,1,70,130,2017-11-09 13:52:12,0
+21894,12,1,19,178,2017-11-07 16:38:34,0
+8681,36,1,14,110,2017-11-08 08:25:42,0
+112302,2,1,41,469,2017-11-09 07:33:21,0
+158559,1,1,7,134,2017-11-07 05:40:59,0
+4393,8,1,22,259,2017-11-07 08:32:13,0
+121339,3,1,22,480,2017-11-09 01:25:05,0
+100393,2,1,25,237,2017-11-08 03:09:30,0
+5147,2,1,6,477,2017-11-08 15:53:56,0
+88281,18,1,3,121,2017-11-08 07:44:34,0
+58962,2,1,37,219,2017-11-07 05:36:17,0
+73011,8,2,13,145,2017-11-09 10:13:40,0
+2805,12,1,19,135,2017-11-08 15:05:34,0
+59391,15,1,13,412,2017-11-07 07:58:25,0
+65177,2,1,14,435,2017-11-07 02:54:47,0
+56411,12,1,13,105,2017-11-09 02:03:21,0
+155357,21,1,19,128,2017-11-07 01:17:15,0
+96983,15,1,14,386,2017-11-07 07:56:35,0
+37565,12,1,22,259,2017-11-08 15:35:28,0
+108942,3,1,37,211,2017-11-07 02:49:03,0
+42164,12,1,19,178,2017-11-09 11:03:51,0
+89232,12,1,13,259,2017-11-09 02:58:41,0
+46797,3,1,19,205,2017-11-08 04:51:28,0
+329443,3,1,9,280,2017-11-09 02:22:06,0
+83795,17,1,13,280,2017-11-08 07:35:29,0
+19836,9,1,20,232,2017-11-09 04:18:23,0
+88304,14,1,13,442,2017-11-09 15:37:42,0
+106078,9,1,22,489,2017-11-07 12:11:49,0
+95718,3,1,13,280,2017-11-08 00:27:41,0
+98178,14,1,19,439,2017-11-09 04:02:35,0
+105363,3,1,14,417,2017-11-09 10:37:27,0
+67192,18,1,15,134,2017-11-09 03:49:45,0
+100519,8,1,3,145,2017-11-07 20:53:52,0
+96057,26,1,17,477,2017-11-08 10:46:49,0
+115615,3,1,41,280,2017-11-09 00:06:56,0
+2027,2,1,14,469,2017-11-08 11:49:03,0
+2253,9,1,16,466,2017-11-09 15:10:01,0
+38407,3,1,53,424,2017-11-08 05:47:28,0
+4295,3,1,26,280,2017-11-07 04:08:20,0
+84896,3,1,19,280,2017-11-07 08:24:07,0
+38219,15,1,17,278,2017-11-07 07:58:44,0
+171738,15,1,13,265,2017-11-07 03:19:45,0
+62963,23,1,13,153,2017-11-09 15:31:52,0
+34284,12,1,41,340,2017-11-08 15:46:11,0
+37717,17,1,32,280,2017-11-08 09:04:57,0
+259962,3,1,3,280,2017-11-08 01:58:10,0
+20861,18,1,20,121,2017-11-08 10:24:19,0
+17204,15,1,13,430,2017-11-08 19:16:37,0
+5178,18,1,13,107,2017-11-08 14:30:05,0
+110727,15,1,18,245,2017-11-07 06:57:44,0
+154943,2,1,17,122,2017-11-07 02:56:39,0
+75393,13,1,11,477,2017-11-08 04:37:51,0
+45287,18,1,41,107,2017-11-09 04:43:40,0
+41232,6,1,22,459,2017-11-09 15:21:27,0
+100275,12,1,13,328,2017-11-07 17:24:19,0
+17426,32,1,31,376,2017-11-08 15:28:43,0
+42424,22,1,41,116,2017-11-08 23:26:53,0
+73313,23,1,13,153,2017-11-07 23:42:32,0
+128829,11,1,37,481,2017-11-07 23:09:04,0
+38876,12,1,9,178,2017-11-07 10:04:31,0
+106862,8,1,25,145,2017-11-07 07:58:34,0
+25058,3,1,10,280,2017-11-09 04:24:32,0
+80703,18,1,3,439,2017-11-07 11:57:31,0
+53964,3,1,25,153,2017-11-09 07:43:11,0
+119901,9,1,17,466,2017-11-08 10:36:41,0
+20309,3,1,40,280,2017-11-09 15:00:59,0
+18108,15,1,20,3,2017-11-08 03:51:28,0
+32623,18,1,17,121,2017-11-07 11:02:47,0
+57519,12,1,10,245,2017-11-08 15:23:55,0
+91047,3,1,19,280,2017-11-07 02:59:44,0
+42139,3,1,36,173,2017-11-09 13:08:33,0
+48170,6,1,27,459,2017-11-09 11:20:44,0
+50657,26,1,53,477,2017-11-09 04:49:38,0
+68550,9,2,36,442,2017-11-08 12:31:17,0
+20411,12,2,13,178,2017-11-09 11:04:18,0
+117094,18,1,32,107,2017-11-07 11:15:49,0
+47313,25,2,9,259,2017-11-09 13:03:19,0
+14903,1,1,19,135,2017-11-08 22:45:06,0
+167134,2,1,11,258,2017-11-08 02:11:21,0
+93642,26,1,18,477,2017-11-09 10:07:02,0
+151603,14,1,19,416,2017-11-09 06:40:53,0
+31428,9,1,22,489,2017-11-08 08:57:47,0
+190273,10,1,12,317,2017-11-07 07:14:58,0
+99938,13,1,25,477,2017-11-08 09:18:19,0
+32392,3,1,6,280,2017-11-09 03:17:01,0
+11042,18,1,19,121,2017-11-07 09:52:14,0
+11073,3,1,6,452,2017-11-09 15:33:10,0
+133349,3,1,17,424,2017-11-08 23:46:51,0
+105475,2,1,32,469,2017-11-09 00:30:57,0
+137397,2,2,37,364,2017-11-09 12:13:17,0
+208842,18,1,13,107,2017-11-09 10:11:25,0
+74068,13,1,18,400,2017-11-08 15:30:24,0
+45299,2,2,37,205,2017-11-09 02:16:56,0
+72921,6,1,17,459,2017-11-07 23:06:43,0
+170404,14,1,37,489,2017-11-06 23:54:27,0
+113862,6,1,19,459,2017-11-09 06:32:46,0
+114314,9,2,19,466,2017-11-08 05:01:13,0
+114795,15,1,8,245,2017-11-09 04:24:25,0
+41691,2,1,6,477,2017-11-07 05:18:31,0
+14661,1,1,35,135,2017-11-07 08:51:52,0
+32471,3,1,10,280,2017-11-07 04:40:25,0
+73367,21,1,13,128,2017-11-09 11:41:22,0
+59925,18,1,18,107,2017-11-07 00:11:42,0
+47313,9,2,37,466,2017-11-08 15:14:10,0
+66015,9,1,13,215,2017-11-07 01:04:58,0
+47999,12,1,20,178,2017-11-09 05:42:25,0
+145896,18,1,28,121,2017-11-07 17:15:18,0
+140594,8,1,13,145,2017-11-07 00:00:10,0
+202993,25,1,22,259,2017-11-07 23:14:18,0
+658,5,1,20,377,2017-11-09 14:44:28,0
+47273,3,1,8,280,2017-11-09 04:22:34,0
+76945,12,1,13,328,2017-11-08 12:16:54,0
+97773,18,1,10,107,2017-11-08 14:32:24,0
+102919,9,1,13,232,2017-11-09 11:36:37,0
+193419,14,1,10,489,2017-11-08 12:26:40,0
+11797,18,1,12,107,2017-11-08 08:50:26,0
+93739,26,1,49,121,2017-11-09 14:45:14,0
+81363,12,1,19,265,2017-11-09 14:08:12,0
+127748,12,1,19,245,2017-11-07 06:41:23,0
+81606,3,1,19,115,2017-11-08 14:28:05,0
+20805,58,3866,866,347,2017-11-09 11:12:31,0
+74422,64,1,10,459,2017-11-06 16:14:18,0
+226336,3,1,53,137,2017-11-08 09:41:10,0
+60381,1,1,13,134,2017-11-07 23:31:34,0
+7304,14,1,3,134,2017-11-09 09:10:20,0
+37515,19,0,24,347,2017-11-09 09:36:01,0
+21042,9,1,19,334,2017-11-07 10:25:17,0
+39782,13,1,8,477,2017-11-08 01:55:36,0
+22978,18,3032,607,107,2017-11-07 06:27:20,0
+114276,13,1,35,469,2017-11-07 11:38:29,0
+8580,27,1,19,153,2017-11-08 04:05:54,0
+1462,9,1,19,107,2017-11-09 00:40:40,0
+70233,6,1,37,459,2017-11-08 08:37:32,0
+92852,12,1,19,19,2017-11-08 20:24:58,0
+65362,21,1,17,128,2017-11-08 23:38:14,0
+90691,12,1,49,340,2017-11-08 11:13:17,0
+44744,3,1,19,115,2017-11-06 17:18:47,0
+100475,3,1,13,452,2017-11-08 23:56:51,0
+112806,20,2,19,259,2017-11-08 17:07:13,0
+39026,5,1,19,377,2017-11-07 02:54:02,0
+265108,3,1,19,280,2017-11-09 00:30:53,0
+80908,17,1,17,280,2017-11-07 14:39:16,0
+91061,14,1,47,489,2017-11-09 03:47:15,0
+73329,14,1,16,480,2017-11-08 09:28:06,0
+10572,12,1,19,265,2017-11-08 12:40:17,0
+55364,3,1,19,211,2017-11-08 08:48:04,0
+36213,2,2,9,205,2017-11-09 15:58:07,0
+200609,13,1,19,477,2017-11-07 02:53:51,0
+163353,12,1,18,245,2017-11-07 07:01:53,0
+125260,15,1,22,265,2017-11-09 04:24:08,0
+161985,1,1,41,134,2017-11-06 23:59:54,0
+34784,18,1,11,107,2017-11-09 05:56:49,0
+73487,3,1,22,280,2017-11-08 13:38:14,0
+161986,23,1,19,153,2017-11-07 02:37:37,0
+208568,2,1,20,477,2017-11-08 15:16:42,0
+152545,12,1,37,122,2017-11-07 23:44:21,0
+57493,15,1,19,153,2017-11-08 03:12:32,0
+189040,6,1,19,125,2017-11-08 07:58:03,0
+78526,22,1,25,496,2017-11-07 01:37:27,0
+14116,8,1,22,145,2017-11-09 02:28:19,0
+81571,6,1,6,459,2017-11-08 00:03:35,0
+1815,1,1,13,134,2017-11-06 23:28:15,0
+58237,12,1,17,178,2017-11-08 01:08:42,0
+29915,2,1,19,477,2017-11-07 02:49:12,0
+15148,3,1,13,280,2017-11-08 03:31:44,0
+125730,12,1,19,497,2017-11-09 13:26:53,0
+117898,12,1,13,277,2017-11-08 00:35:21,0
+68079,2,1,13,477,2017-11-08 16:24:49,0
+14516,2,1,14,205,2017-11-09 15:41:09,0
+137007,15,1,48,245,2017-11-07 03:28:07,0
+4825,12,1,10,328,2017-11-07 01:53:38,0
+86383,1,1,17,134,2017-11-07 08:42:39,0
+105720,9,1,19,107,2017-11-09 00:43:06,0
+13104,64,2,37,459,2017-11-06 23:32:12,0
+27056,12,1,13,178,2017-11-08 03:43:44,0
+31675,3,1,19,489,2017-11-08 11:13:24,0
+105475,9,2,26,442,2017-11-07 18:57:21,0
+249725,9,1,23,215,2017-11-08 07:26:28,0
+50330,18,1,13,107,2017-11-08 09:19:15,0
+5348,18,1,32,107,2017-11-09 13:56:41,0
+129385,12,1,19,265,2017-11-07 06:22:10,0
+72000,12,1,19,140,2017-11-07 00:24:20,0
+212579,7,1,32,101,2017-11-07 14:17:20,0
+73954,18,1,19,107,2017-11-07 00:51:46,0
+38095,2,1,17,477,2017-11-07 05:35:53,0
+108535,13,1,8,477,2017-11-09 13:55:16,0
+71805,3,1,13,280,2017-11-08 13:32:06,0
+195916,15,1,22,153,2017-11-09 10:22:34,0
+106898,20,1,11,478,2017-11-07 11:47:08,0
+80369,3,1,19,135,2017-11-08 09:28:29,0
+167667,3,1,19,205,2017-11-08 13:36:49,0
+99897,9,1,13,107,2017-11-09 10:45:29,0
+169297,18,1,15,439,2017-11-07 04:52:02,0
+138309,12,1,13,178,2017-11-07 05:21:06,0
+12479,2,1,19,205,2017-11-09 04:06:50,0
+19161,3,1,3,409,2017-11-08 11:58:04,0
+85154,12,1,17,259,2017-11-07 05:37:03,0
+19868,9,1,19,466,2017-11-09 00:52:00,0
+175442,14,1,19,208,2017-11-07 00:51:31,0
+73487,24,2,20,105,2017-11-08 15:32:43,0
+31184,3,1,13,424,2017-11-09 01:58:50,0
+105323,3,1,13,280,2017-11-08 10:57:53,0
+71449,14,1,13,379,2017-11-09 10:58:23,0
+46637,1,1,19,134,2017-11-06 23:00:27,0
+14868,1,1,25,135,2017-11-08 13:47:08,0
+203706,2,1,13,452,2017-11-07 03:20:07,0
+20878,18,1,12,107,2017-11-09 00:32:01,0
+311671,15,1,20,265,2017-11-09 06:30:51,0
+193346,3,1,4,205,2017-11-08 01:35:12,0
+83090,3,1,13,130,2017-11-07 07:04:37,0
+163662,58,1,19,120,2017-11-08 02:32:12,0
+73516,12,1,16,326,2017-11-09 12:39:03,0
+20134,9,1,22,489,2017-11-08 13:24:38,0
+5313,12,1,9,265,2017-11-07 00:10:11,0
+146698,8,1,13,145,2017-11-06 23:49:59,0
+50482,64,1,19,459,2017-11-08 12:18:52,0
+4503,2,1,19,237,2017-11-07 00:55:46,0
+34768,12,1,19,245,2017-11-08 00:14:52,0
+68247,14,1,37,371,2017-11-07 00:46:45,0
+201801,12,1,17,497,2017-11-08 04:24:34,0
+10392,20,1,22,478,2017-11-09 08:41:23,0
+124446,18,2,97,121,2017-11-08 13:24:59,0
+280287,2,1,19,469,2017-11-09 08:10:02,0
+176732,12,1,19,265,2017-11-07 04:43:26,0
+48072,9,1,25,442,2017-11-07 00:27:42,0
+8356,12,1,19,245,2017-11-07 23:39:39,0
+118648,2,1,19,236,2017-11-09 12:31:39,0
+37972,18,1,13,134,2017-11-07 15:36:52,0
+31590,11,1,18,122,2017-11-08 15:53:12,0
+26995,12,1,17,340,2017-11-08 07:04:58,0
+76921,11,1,19,173,2017-11-07 09:14:08,0
+106824,18,1,18,439,2017-11-09 15:51:55,0
+109723,1,1,73,134,2017-11-06 22:01:52,0
+123994,20,1,20,478,2017-11-07 01:45:24,0
+118930,12,1,18,178,2017-11-08 13:37:11,0
+178404,2,1,8,237,2017-11-07 03:34:02,0
+59868,18,1,28,107,2017-11-08 10:28:23,0
+177466,3,1,23,424,2017-11-08 08:47:25,0
+43668,11,1,19,319,2017-11-09 08:37:14,0
+12062,2,1,19,219,2017-11-07 06:49:27,0
+80432,12,1,13,205,2017-11-06 23:20:29,0
+75844,2,1,17,435,2017-11-07 06:29:44,0
+122880,3,1,13,205,2017-11-09 01:08:35,0
+81138,18,3032,607,107,2017-11-07 02:37:43,0
+76919,14,1,32,379,2017-11-08 08:36:04,0
+30795,18,1,13,107,2017-11-09 11:16:57,0
+16499,15,1,22,386,2017-11-09 11:08:19,0
+24266,12,1,18,178,2017-11-08 02:38:38,0
+178873,15,1,19,265,2017-11-08 08:38:17,0
+190177,20,1,13,259,2017-11-07 16:44:21,0
+149726,2,1,25,212,2017-11-08 15:15:09,0
+59214,14,1,19,442,2017-11-09 08:53:39,0
+67772,3,1,35,153,2017-11-08 14:58:55,0
+109156,17,1,22,280,2017-11-08 08:55:55,0
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/xgboost_train_sample.py b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/xgboost_train_sample.py
new file mode 100644
index 00000000000..2bf61e98900
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/example_dags/xgboost_train_sample.py
@@ -0,0 +1,75 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import os
+
+import pandas as pd
+from sklearn.metrics import accuracy_score
+from sklearn.metrics import classification_report
+from sklearn.model_selection import train_test_split
+from xgboost.sklearn import XGBClassifier
+
+
+def read_dataset(train_feature_path):
+ if train_feature_path.startswith("/"):
+ # local file
+ if '*' in train_feature_path:
+ return pd.concat(map(pd.read_csv, glob.glob(os.path.join('', train_feature_path))))
+ else:
+ return pd.read_csv(train_feature_path)
+ else:
+ raise Exception("remote files is unsupported")
+
+
+# assume that the first column is the label
+def prepare_dataset(train_df, seed, test_size):
+ # drop column label
+ X_data = train_df.drop('is_attributed', axis=1)
+ y = train_df.is_attributed
+
+ # Split the dataset into train and Test
+ return train_test_split(
+ X_data, y, test_size=test_size, random_state=seed
+ )
+
+
+def xgboost_train(X_train, X_test, y_train, y_test, model_path):
+ print('Training by xgb')
+ # default is binary:logistic
+ train_model = XGBClassifier(use_label_encoder=False).fit(X_train, y_train)
+ pred = train_model.predict(X_test)
+ print('Classification report:\n', classification_report(y_test, pred))
+ auc = accuracy_score(y_test, pred) * 100
+ print(f'Accuracy score: {auc}')
+
+ print('Save model to ', model_path)
+ train_model.save_model(model_path)
+ return auc
+
+
+# only csv now
+def train(train_feature_path, model_path, seed=7, test_size=0.25):
+ train_df = read_dataset(train_feature_path)
+ X_train, X_test, y_train, y_test = prepare_dataset(train_df, seed, test_size)
+ return xgboost_train(X_train, X_test, y_train, y_test, model_path)
+
+
+def train_task(*op_args, **op_kwargs):
+ return train(op_args[0], op_args[1])
+
+
+if __name__ == '__main__':
+ print(glob.glob(os.path.join('', '/tmp/feature_data/*.csv')))
+ train('/tmp/feature_data/*.csv', '/tmp/model.json')
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/hooks/__init__.py b/extensions/airflow-provider-openmldb/openmldb_provider/hooks/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/hooks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/hooks/openmldb_hook.py b/extensions/airflow-provider-openmldb/openmldb_provider/hooks/openmldb_hook.py
new file mode 100644
index 00000000000..f371ec1995b
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/hooks/openmldb_hook.py
@@ -0,0 +1,256 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Callable, Dict, Optional, Union
+
+import requests
+import tenacity
+from requests.auth import HTTPBasicAuth
+from requests_toolbelt.adapters.socket_options import TCPKeepAliveAdapter
+
+from airflow.exceptions import AirflowException
+from airflow.hooks.base import BaseHook
+
+
+class OpenMLDBHook(BaseHook):
+ """
+ The Hook that interacts with an OpenMLDB API Server endpoint(HTTP) the Python requests library.
+
+ :param method: the API method to be called
+ :type method: str
+ :param openmldb_conn_id: connection that has the base API url i.e https://www.google.com/
+ and optional authentication credentials. Default headers can also be specified in
+ the Extra field in json format.
+ :type openmldb_conn_id: str
+ :param auth_type: The auth type for the service
+ :type auth_type: AuthBase of python requests lib
+ """
+
+ conn_name_attr = 'openmldb_conn_id'
+ default_conn_name = 'openmldb_default'
+ # use http, otherwise we should add Custom Connection Forms
+ conn_type = 'http'
+ hook_name = 'OpenMLDB'
+
+ def __init__(
+ self,
+ method: str = 'POST',
+ openmldb_conn_id: str = default_conn_name,
+ auth_type: Any = HTTPBasicAuth,
+ tcp_keep_alive: bool = True,
+ tcp_keep_alive_idle: int = 120,
+ tcp_keep_alive_count: int = 20,
+ tcp_keep_alive_interval: int = 30,
+ ) -> None:
+ super().__init__()
+ self.http_conn_id = openmldb_conn_id
+ self.method = method.upper()
+ self.base_url: str = ""
+ self._retry_obj: Callable[..., Any]
+ self.auth_type: Any = auth_type
+ self.tcp_keep_alive = tcp_keep_alive
+ self.keep_alive_idle = tcp_keep_alive_idle
+ self.keep_alive_count = tcp_keep_alive_count
+ self.keep_alive_interval = tcp_keep_alive_interval
+
+ # headers may be passed through directly or in the "extra" field in the connection
+ # definition
+ def get_conn(self, headers: Optional[Dict[Any, Any]] = None) -> requests.Session:
+ """
+ Returns http session for use with requests
+
+ :param headers: additional headers to be passed through as a dictionary
+ """
+ session = requests.Session()
+
+ if self.http_conn_id:
+ conn = self.get_connection(self.http_conn_id)
+
+ if conn.host and "://" in conn.host:
+ self.base_url = conn.host
+ else:
+ # schema defaults to HTTP
+ schema = conn.schema if conn.schema else "http"
+ host = conn.host if conn.host else ""
+ self.base_url = schema + "://" + host
+
+ if conn.port:
+ self.base_url = self.base_url + ":" + str(conn.port)
+ if conn.login:
+ session.auth = self.auth_type(conn.login, conn.password)
+ if conn.extra:
+ try:
+ session.headers.update(conn.extra_dejson)
+ except TypeError:
+ self.log.warning('Connection to %s has invalid extra field.', conn.host)
+ if headers:
+ session.headers.update(headers)
+
+ return session
+
+ def run(
+ self,
+ endpoint: Optional[str] = None,
+ data: Optional[Union[Dict[str, Any], str]] = None,
+ headers: Optional[Dict[str, Any]] = None,
+ extra_options: Optional[Dict[str, Any]] = None,
+ **request_kwargs: Any,
+ ) -> Any:
+ r"""
+ Performs the request
+
+ :param endpoint: the endpoint to be called i.e. resource/v1/query?
+ :param data: payload to be uploaded or request parameters
+ :param headers: additional headers to be passed through as a dictionary
+ :param extra_options: additional options to be used when executing the request
+ i.e. {'check_response': False} to avoid checking raising exceptions on non
+ 2XX or 3XX status codes
+ :param request_kwargs: Additional kwargs to pass when creating a request.
+ For example, ``run(json=obj)`` is passed as ``requests.Request(json=obj)``
+ """
+ extra_options = extra_options or {}
+
+ session = self.get_conn(headers)
+
+ url = self.url_from_endpoint(endpoint)
+
+ if self.tcp_keep_alive:
+ keep_alive_adapter = TCPKeepAliveAdapter(
+ idle=self.keep_alive_idle, count=self.keep_alive_count, interval=self.keep_alive_interval
+ )
+ session.mount(url, keep_alive_adapter)
+ if self.method == 'GET':
+ # GET uses params
+ req = requests.Request(self.method, url, params=data, headers=headers, **request_kwargs)
+ elif self.method == 'HEAD':
+ # HEAD doesn't use params
+ req = requests.Request(self.method, url, headers=headers, **request_kwargs)
+ else:
+ # Others use data
+ req = requests.Request(self.method, url, data=data, headers=headers, **request_kwargs)
+
+ prepped_request = session.prepare_request(req)
+ self.log.info("Sending '%s' to url: %s", self.method, url)
+ return self.run_and_check(session, prepped_request, extra_options)
+
+ def check_response(self, response: requests.Response) -> None:
+ """
+ Checks the status code and raise an AirflowException exception on non 2XX or 3XX
+ status codes
+
+ :param response: A requests response object
+ """
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError:
+ self.log.error("HTTP error: %s", response.reason)
+ self.log.error(response.text)
+ raise AirflowException(str(response.status_code) + ":" + response.reason)
+
+ def run_and_check(
+ self,
+ session: requests.Session,
+ prepped_request: requests.PreparedRequest,
+ extra_options: Dict[Any, Any],
+ ) -> Any:
+ """
+ Grabs extra options like timeout and actually runs the request,
+ checking for the result
+
+ :param session: the session to be used to execute the request
+ :param prepped_request: the prepared request generated in run()
+ :param extra_options: additional options to be used when executing the request
+ i.e. ``{'check_response': False}`` to avoid checking raising exceptions on non 2XX
+ or 3XX status codes
+ """
+ extra_options = extra_options or {}
+
+ settings = session.merge_environment_settings(
+ prepped_request.url,
+ proxies=extra_options.get("proxies", {}),
+ stream=extra_options.get("stream", False),
+ verify=extra_options.get("verify"),
+ cert=extra_options.get("cert"),
+ )
+
+ # Send the request.
+ send_kwargs: Dict[str, Any] = {
+ "timeout": extra_options.get("timeout"),
+ "allow_redirects": extra_options.get("allow_redirects", True),
+ }
+ send_kwargs.update(settings)
+
+ try:
+ response = session.send(prepped_request, **send_kwargs)
+
+ if extra_options.get('check_response', True):
+ self.check_response(response)
+ return response
+
+ except requests.exceptions.ConnectionError as ex:
+ self.log.warning('%s Tenacity will retry to execute the operation', ex)
+ raise ex
+
+ def run_with_advanced_retry(self, _retry_args: Dict[Any, Any], *args: Any, **kwargs: Any) -> Any:
+ """
+ Runs Hook.run() with a Tenacity decorator attached to it. This is useful for
+ connectors which might be disturbed by intermittent issues and should not
+ instantly fail.
+
+ :param _retry_args: Arguments which define the retry behaviour.
+ See Tenacity documentation at https://github.com/jd/tenacity
+
+
+ .. code-block:: python
+
+ hook = HttpHook(http_conn_id="my_conn", method="GET")
+ retry_args = dict(
+ wait=tenacity.wait_exponential(),
+ stop=tenacity.stop_after_attempt(10),
+ retry=tenacity.retry_if_exception_type(Exception),
+ )
+ hook.run_with_advanced_retry(endpoint="v1/test", _retry_args=retry_args)
+
+ """
+ self._retry_obj = tenacity.Retrying(**_retry_args)
+
+ return self._retry_obj(self.run, *args, **kwargs)
+
+ def url_from_endpoint(self, endpoint: Optional[str]) -> str:
+ """Combine base url with endpoint"""
+ if self.base_url and not self.base_url.endswith('/') and endpoint and not endpoint.startswith('/'):
+ return self.base_url + '/' + endpoint
+ return (self.base_url or '') + (endpoint or '')
+
+ def test_connection(self):
+ """Test HTTP Connection"""
+ try:
+ self.run()
+ return True, 'Connection successfully tested'
+ except Exception as e:
+ return False, str(e)
+
+ def submit_job(self, db: str, mode: str, sql: str):
+ """
+ Submits a job to a OpenMLDB API server.
+
+ :param db: Required. The database in the OpenMLDB. If DDL, the db can be non-existent.
+ :param mode: Required. Mode: offsync, offasync, online. If DDL, choose any mode.
+ :param sql: Required. The sql of the OpenMLDB job.
+ """
+ return self.run(
+ endpoint=f"dbs/{db}",
+ json={"mode": mode, "sql": sql},
+ headers={"accept": "application/json"},
+ )
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/operators/__init__.py b/extensions/airflow-provider-openmldb/openmldb_provider/operators/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/operators/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/openmldb_provider/operators/openmldb_operator.py b/extensions/airflow-provider-openmldb/openmldb_provider/operators/openmldb_operator.py
new file mode 100644
index 00000000000..163d93b1c40
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/openmldb_provider/operators/openmldb_operator.py
@@ -0,0 +1,160 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module contains OpenMLDB operators."""
+from enum import Enum
+from typing import TYPE_CHECKING
+
+from airflow import AirflowException
+from airflow.models import BaseOperator
+from airflow.utils.operator_helpers import determine_kwargs
+
+from openmldb_provider.hooks.openmldb_hook import OpenMLDBHook
+
+if TYPE_CHECKING:
+ from airflow.utils.context import Context
+
+
+class Mode(Enum):
+ """Available options for OpenMLDB execute mode"""
+
+ OFFSYNC = 'offsync'
+ OFFASYNC = 'offasync'
+ ONLINE = 'online'
+
+
+class OpenMLDBSQLOperator(BaseOperator):
+ """
+ This operator runs any sql on OpenMLDB
+
+ :param db: The database you want to use
+ :param mode: The execute mode, offsync, offasync, online.
+ :param sql: The sql you want to deploy
+ :param openmldb_conn_id: The Airflow connection used for OpenMLDB.
+ :keyword disable_response_check: If true, do not do response check
+ :keyword response_check: custom response check function. If None, check if the response code equals 0.
+ """
+
+ def __init__(
+ self,
+ db: str,
+ mode: Mode,
+ sql: str,
+ openmldb_conn_id: str = 'openmldb_default',
+ **kwargs,
+ ) -> None:
+ if kwargs.pop("disable_response_check", False):
+ self.response_check = None
+ else:
+ self.response_check = kwargs.pop("response_check", lambda response: response.json()["code"] == 0)
+ super().__init__(**kwargs)
+ self.openmldb_conn_id = openmldb_conn_id
+ self.db = db
+ self.mode = mode
+ self.sql = sql
+
+ def execute(self, context: 'Context'):
+ openmldb_hook = OpenMLDBHook(openmldb_conn_id=self.openmldb_conn_id)
+ response = openmldb_hook.submit_job(db=self.db, mode=self.mode.value, sql=self.sql)
+
+ if self.response_check:
+ kwargs = determine_kwargs(self.response_check, [response], context)
+ if not self.response_check(response, **kwargs):
+ raise AirflowException(
+ f"Response check returned False. Resp: {response.text}"
+ )
+ return response.text
+
+
+class OpenMLDBLoadDataOperator(OpenMLDBSQLOperator):
+ """
+ This operator loads data to OpenMLDB
+
+ :param db: The database you want to use
+ :param mode: The execute mode
+ :param table: The table you want to load data to
+ :param file: The data path you want to load, local or hdfs
+ :param openmldb_conn_id: The Airflow connection used for OpenMLDB.
+ :keyword options: load data options
+ """
+
+ def __init__(
+ self,
+ db: str,
+ mode: Mode,
+ table: str,
+ file: str,
+ openmldb_conn_id: str = 'openmldb_default',
+ **kwargs,
+ ) -> None:
+ load_data_options = kwargs.pop('options', None)
+ sql = f"LOAD DATA INFILE '{file}' INTO TABLE {table}"
+ if load_data_options:
+ sql += f" OPTIONS({load_data_options})"
+ super().__init__(db=db, mode=mode, sql=sql, **kwargs)
+ self.openmldb_conn_id = openmldb_conn_id
+
+
+class OpenMLDBSelectIntoOperator(OpenMLDBSQLOperator):
+ """
+ This operator extracts feature from OpenMLDB and save it
+
+ :param db: The database you want to use
+ :param mode: The execute mode
+ :param table: The table you want to select
+ :param file: The data path you want to save features, local or hdfs
+ :param openmldb_conn_id: The Airflow connection used for OpenMLDB.
+ :keyword options: select into options
+ """
+
+ def __init__(
+ self,
+ db: str,
+ mode: Mode,
+ sql: str,
+ file: str,
+ openmldb_conn_id: str = 'openmldb_default',
+ **kwargs,
+ ) -> None:
+ select_out_options = kwargs.pop('options', None)
+ sql = f"{sql} INTO OUTFILE '{file}'"
+ if select_out_options:
+ sql += f" OPTIONS({select_out_options})"
+ super().__init__(db=db, mode=mode, sql=sql, **kwargs)
+ self.openmldb_conn_id = openmldb_conn_id
+
+
+class OpenMLDBDeployOperator(OpenMLDBSQLOperator):
+ """
+ This operator deploys a sql to OpenMLDB
+
+ :param db: The database you want to use
+ :param deploy_name: The deployment name
+ :param sql: The sql you want to deploy
+ :param openmldb_conn_id: The Airflow connection used for OpenMLDB.
+ :keyword options: load data options
+ """
+
+ def __init__(
+ self,
+ db: str,
+ deploy_name: str,
+ sql: str,
+ openmldb_conn_id: str = 'openmldb_default',
+ **kwargs,
+ ) -> None:
+ super().__init__(
+ db=db, mode=Mode.ONLINE, sql=f"DEPLOY {deploy_name} {sql}", **kwargs # mode can be any one
+ )
+ self.openmldb_conn_id = openmldb_conn_id
diff --git a/extensions/airflow-provider-openmldb/requirements.txt b/extensions/airflow-provider-openmldb/requirements.txt
new file mode 100644
index 00000000000..0b113fb32cf
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/requirements.txt
@@ -0,0 +1,8 @@
+setuptools>=62.3.2
+requests>=2.27.1
+# pandas>=1.4.2
+# sklearn>=0.0
+# scikit-learn>=1.1.1
+# xgboost>=1.4.2
+tenacity>=8.0.1
+apache-airflow>=2.0
\ No newline at end of file
diff --git a/extensions/airflow-provider-openmldb/setup.py b/extensions/airflow-provider-openmldb/setup.py
new file mode 100644
index 00000000000..a7b6e06f8ed
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/setup.py
@@ -0,0 +1,46 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Setup.py for the OpenMLDB Airflow provider package. Built from datadog provider package for now."""
+
+from setuptools import setup
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+"""Perform the package airflow-provider-openmldb setup."""
+setup(
+ name='airflow-provider-openmldb',
+ version="0.0.1",
+ description='A openmldb provider package built by 4paradigm.',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ entry_points={
+ "apache_airflow_provider": [
+ "provider_info=openmldb_provider.__init__:get_provider_info"
+ ]
+ },
+ license="copyright 4paradigm.com",
+ packages=['openmldb_provider', 'openmldb_provider.hooks',
+ 'openmldb_provider.operators'],
+ install_requires=['apache-airflow>=2.0'],
+ setup_requires=['setuptools', 'wheel'],
+ author='Huang Wei',
+ author_email='huangwei@apache.org',
+ url='https://github.com/4paradigm/OpenMLDB',
+ classifiers=[
+ "Framework :: Apache Airflow",
+ "Framework :: Apache Airflow :: Provider",
+ ],
+ python_requires='~=3.7',
+)
diff --git a/extensions/airflow-provider-openmldb/tests/__init__.py b/extensions/airflow-provider-openmldb/tests/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/tests/hooks/__init__.py b/extensions/airflow-provider-openmldb/tests/hooks/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/tests/hooks/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/tests/hooks/test_openmldb_api_hook.py b/extensions/airflow-provider-openmldb/tests/hooks/test_openmldb_api_hook.py
new file mode 100644
index 00000000000..2dcd9f447f1
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/tests/hooks/test_openmldb_api_hook.py
@@ -0,0 +1,166 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unittest module to test Hooks.
+
+Requires the unittest, pytest, and requests-mock Python libraries.
+
+Run test:
+
+ python3 -m unittest tests.hooks.test_sample_hook.TestSampleHook
+
+"""
+import json
+import logging
+import unittest
+from unittest import mock, skip
+
+import requests_mock
+from airflow.models import Connection
+from airflow.utils import db
+
+# Import Hook
+from openmldb_provider.hooks.openmldb_hook import OpenMLDBHook
+
+log = logging.getLogger(__name__)
+
+
+class TestOpenMLDBHook(unittest.TestCase):
+ openmldb_conn_id = 'openmldb_conn_id_test'
+ test_db_endpoint = 'http://127.0.0.1:9080/dbs/test_db'
+
+ _mock_job_status_success_response_body = {'code': 0, 'msg': 'ok'}
+
+ def setUp(self):
+ db.merge_conn(
+ Connection(
+ conn_id='openmldb_conn_id_test', conn_type='openmldb', host='http://127.0.0.1', port=9080
+ )
+ )
+ self.hook = OpenMLDBHook(openmldb_conn_id=self.openmldb_conn_id)
+
+ @requests_mock.mock()
+ def test_submit_offsync_job(self, m):
+ m.post(self.test_db_endpoint, status_code=200, json=self._mock_job_status_success_response_body)
+ resp = self.hook.submit_job('test_db', 'offsync', 'select * from t1')
+ assert resp.status_code == 200
+ assert resp.json() == self._mock_job_status_success_response_body
+
+
+@skip
+# Mock the `conn_sample` Airflow connection
+@mock.patch.dict('os.environ', AIRFLOW_CONN_CONN_SAMPLE='http://https%3A%2F%2Fwww.httpbin.org%2F')
+@mock.patch.dict('os.environ',
+ AIRFLOW_CONN_OPENMLDB_DEFAULT='http://http%3A%2F%2F127.0.0.1%3A9080%2Fdbs%2Fairflow_test')
+class TestOpenMLDBAPIHook(unittest.TestCase):
+ """
+ Test OpenMLDB API Hook.
+ """
+
+ @requests_mock.mock()
+ def test_post(self, m):
+ # Mock endpoint
+ m.post('https://www.httpbin.org/', json={'data': 'mocked response'})
+
+ # Instantiate hook
+ hook = OpenMLDBHook(
+ openmldb_conn_id='conn_sample',
+ method='post'
+ )
+
+ # Sample Hook's run method executes an API call
+ response = hook.run()
+
+ # Retrieve response payload
+ payload = response.json()
+
+ # Assert success status code
+ assert response.status_code == 200
+
+ # Assert the API call returns expected mocked payload
+ assert payload['data'] == 'mocked response'
+
+ @requests_mock.mock()
+ def test_get(self, m):
+ # Mock endpoint
+ m.get('https://www.httpbin.org/', json={'data': 'mocked response'})
+
+ # Instantiate hook
+ hook = OpenMLDBHook(
+ openmldb_conn_id='conn_sample',
+ method='get'
+ )
+
+ # Sample Hook's run method executes an API call
+ response = hook.run()
+
+ # Retrieve response payload
+ payload = response.json()
+
+ # Assert success status code
+ assert response.status_code == 200
+
+ # Assert the API call returns expected mocked payload
+ assert payload['data'] == 'mocked response'
+
+ def test_query_api_server_without_data(self):
+ hook = OpenMLDBHook()
+ # no data
+ response = hook.run()
+ res = json.loads(response.text)
+ assert res == {'code': -1, 'msg': 'Json parse failed'}
+
+ def test_query_api_server_with_sql(self):
+ hook = OpenMLDBHook()
+ response = hook.run(data='{"sql":"select 1", "mode":"offsync"}')
+ res = json.loads(response.text)
+ assert res == {'code': 0, 'msg': 'ok'}
+
+ def test_query_api_server_without_mode(self):
+ hook = OpenMLDBHook()
+ response = hook.run(data='{"sql":"select 1"}')
+ res = json.loads(response.text)
+ assert res['code'] == -1
+ assert res['msg'].startswith('Json parse failed')
+
+ def test_query_api_server(self):
+ hook = OpenMLDBHook()
+ # We can send ddl by post too, but not recommended for users.
+ # Here just do it for tests, mode won't affect
+ response = hook.run(data='{"sql": "create database if not exists airflow_test", "mode": "online"}',
+ headers={"content-type": "application/json"})
+ res = json.loads(response.text)
+ assert res == {'code': 0, 'msg': 'ok'}
+
+ response = hook.run(data='{"sql":"create table if not exists airflow_table(c1 int)", "mode":"online"}',
+ headers={"content-type": "application/json"})
+ res = json.loads(response.text)
+ assert res == {'code': 0, 'msg': 'ok'}
+
+ # an offline sync query
+ response = hook.run(data='{"sql":"select * from airflow_table", "mode":"offsync"}',
+ headers={"content-type": "application/json"})
+ res = json.loads(response.text)
+ assert res == {'code': 0, 'msg': 'ok'}
+
+ # an online query(always sync)
+ response = hook.run(data='{"sql":"select * from airflow_table", "mode":"online"}',
+ headers={"content-type": "application/json"})
+ res = json.loads(response.text)
+ assert res == {'code': 0, 'msg': 'ok'}
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/extensions/airflow-provider-openmldb/tests/operators/__init__.py b/extensions/airflow-provider-openmldb/tests/operators/__init__.py
new file mode 100644
index 00000000000..835f9218b72
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/tests/operators/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/extensions/airflow-provider-openmldb/tests/operators/test_openmldb_operator.py b/extensions/airflow-provider-openmldb/tests/operators/test_openmldb_operator.py
new file mode 100644
index 00000000000..f09d9c8d354
--- /dev/null
+++ b/extensions/airflow-provider-openmldb/tests/operators/test_openmldb_operator.py
@@ -0,0 +1,234 @@
+# Copyright 2021 4Paradigm
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unittest module to test Operators.
+
+Requires the unittest, pytest, and requests-mock Python libraries.
+
+Run test:
+
+ python3 -m unittest tests.operators.test_openmldb_operator.TestOpenMLDBOperator
+
+"""
+
+import logging
+import unittest
+from unittest import mock, skip
+
+import pytest
+import requests
+
+from openmldb_provider.hooks.openmldb_hook import OpenMLDBHook
+from openmldb_provider.operators.openmldb_operator import (OpenMLDBSQLOperator, Mode, OpenMLDBDeployOperator,
+ OpenMLDBSelectIntoOperator, OpenMLDBLoadDataOperator)
+
+log = logging.getLogger(__name__)
+
+MOCK_TASK_ID = "test-openmldb-operator"
+MOCK_DB = "mock_db"
+MOCK_TABLE = "mock_table"
+MOCK_FILE = "mock_file_name"
+MOCK_OPENMLDB_CONN_ID = "mock_openmldb_conn"
+
+
+@mock.patch.dict('os.environ', AIRFLOW_CONN_MOCK_OPENMLDB_CONN='http://http%3A%2F%2F1.2.3.4%3A9080%2F')
+class TestOpenMLDBLoadDataOperator:
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ def test_execute(self, mock_submit_job):
+ operator = OpenMLDBLoadDataOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC,
+ table=MOCK_TABLE,
+ file=MOCK_FILE,
+ disable_response_check=True,
+ )
+ operator.execute({})
+
+ mock_submit_job.assert_called_once_with(
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC.value,
+ sql=f"LOAD DATA INFILE '{MOCK_FILE}' INTO " f"TABLE {MOCK_TABLE}",
+ )
+
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ def test_execute_with_options(self, mock_submit_job):
+ response = requests.Response()
+ response.status_code = 200
+ response._content = b'{"code": 0, "msg": "ok"}'
+ mock_submit_job.return_value = response
+
+ options = "mode='overwrite'"
+ operator = OpenMLDBLoadDataOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC,
+ table=MOCK_TABLE,
+ file=MOCK_FILE,
+ options=options,
+ )
+ operator.execute({})
+ mock_submit_job.assert_called_once_with(
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC.value,
+ sql=f"LOAD DATA INFILE '{MOCK_FILE}' INTO " f"TABLE {MOCK_TABLE} OPTIONS" f"({options})",
+ )
+
+
+@mock.patch.dict('os.environ', AIRFLOW_CONN_MOCK_OPENMLDB_CONN='http://http%3A%2F%2F1.2.3.4%3A9080%2F')
+class TestOpenMLDBSelectOutOperator:
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ def test_execute(self, mock_submit_job):
+ fe_sql = (
+ "SELECT id, ts, sum(c1) over w1 FROM t1 WINDOW w1 as "
+ "(PARTITION BY id ORDER BY ts BETWEEN 20s PRECEDING AND CURRENT ROW)"
+ )
+ operator = OpenMLDBSelectIntoOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC,
+ sql=fe_sql,
+ file=MOCK_FILE,
+ disable_response_check=True,
+ )
+ operator.execute({})
+
+ mock_submit_job.assert_called_once_with(
+ db=MOCK_DB, mode=Mode.OFFSYNC.value, sql=f"{fe_sql} INTO OUTFILE '{MOCK_FILE}'"
+ )
+
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ def test_execute_with_options(self, mock_submit_job):
+ response = requests.Response()
+ response.status_code = 200
+ response._content = b'{"code": 0, "msg": "ok"}'
+ mock_submit_job.return_value = response
+
+ fe_sql = (
+ "SELECT id, ts, sum(c1) over w1 FROM t1 WINDOW w1 as "
+ "(PARTITION BY id ORDER BY ts BETWEEN 20s PRECEDING AND CURRENT ROW)"
+ )
+ options = "mode='errorifexists', delimiter='-'"
+ operator = OpenMLDBSelectIntoOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC,
+ sql=fe_sql,
+ file=MOCK_FILE,
+ options=options,
+ disable_response_check=True,
+ )
+ operator.execute({})
+
+ mock_submit_job.assert_called_once_with(
+ db=MOCK_DB,
+ mode=Mode.OFFSYNC.value,
+ sql=f"{fe_sql} INTO OUTFILE '{MOCK_FILE}' OPTIONS({options})",
+ )
+
+
+@mock.patch.dict('os.environ', AIRFLOW_CONN_MOCK_OPENMLDB_CONN='http://http%3A%2F%2F1.2.3.4%3A9080%2F')
+class TestOpenMLDBDeployOperator:
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ def test_execute(self, mock_submit_job):
+ fe_sql = (
+ "SELECT id, ts, sum(c1) over w1 FROM t1 WINDOW w1 as "
+ "(PARTITION BY id ORDER BY ts BETWEEN 20s PRECEDING AND CURRENT ROW)"
+ )
+ deploy_name = "demo"
+ operator = OpenMLDBDeployOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ deploy_name=deploy_name,
+ sql=fe_sql,
+ disable_response_check=True,
+ )
+ operator.execute({})
+
+ mock_submit_job.assert_called_once_with(
+ db=MOCK_DB, mode=Mode.ONLINE.value, sql=f"DEPLOY {deploy_name} {fe_sql}"
+ )
+
+
+@mock.patch.dict('os.environ', AIRFLOW_CONN_MOCK_OPENMLDB_CONN='http://http%3A%2F%2F1.2.3.4%3A9080%2F')
+class TestOpenMLDBSQLOperator:
+ @mock.patch.object(OpenMLDBHook, "submit_job")
+ @pytest.mark.parametrize(
+ "sql, mode",
+ [
+ ("create database if not exists test_db", Mode.OFFSYNC),
+ ("SHOW JOBS", Mode.ONLINE),
+ ("SELECT 1", Mode.OFFSYNC),
+ ("SELECT 1", Mode.ONLINE),
+ ],
+ )
+ def test_execute(self, mock_submit_job, sql, mode):
+ operator = OpenMLDBSQLOperator(
+ task_id=MOCK_TASK_ID,
+ openmldb_conn_id=MOCK_OPENMLDB_CONN_ID,
+ db=MOCK_DB,
+ mode=mode,
+ sql=sql,
+ disable_response_check=True,
+ )
+ operator.execute({})
+
+ mock_submit_job.assert_called_once_with(db=MOCK_DB, mode=mode.value, sql=sql)
+
+
+@skip
+@mock.patch.dict('os.environ', AIRFLOW_CONN_OPENMLDB_DEFAULT='http://http%3A%2F%2F127.0.0.1%3A9080%2F')
+class TestOpenMLDBOperatorIT(unittest.TestCase):
+ """
+ Test OpenMLDB Operator.
+ """
+
+ def test_operator_with_empty_sql(self):
+ operator = OpenMLDBSQLOperator(
+ task_id='run_operator', db='foo', mode=Mode.ONLINE,
+ sql='', response_check=lambda response: (response.json()['code'] == 2000) and (
+ 'sql trees is null or empty' in response.json()['msg']))
+ operator.execute({})
+
+ def test_operator_with_sql(self):
+ test_db = "airflow_test_db"
+ test_table = "airflow_test_table"
+
+ OpenMLDBSQLOperator(task_id='setup-database', db=test_db,
+ mode=Mode.OFFSYNC,
+ sql=f'create database if not exists {test_db}').execute({})
+ OpenMLDBSQLOperator(task_id='setup-table', db=test_db,
+ mode=Mode.OFFSYNC,
+ sql=f'create table if not exists {test_table}(c1 int)').execute({})
+ # TODO(hw): response doesn't have the result data now, so just do an offline query here.
+ # But you can check status.
+ OpenMLDBSQLOperator(task_id='feature-extraction-offline', db=test_db,
+ mode=Mode.OFFSYNC,
+ sql=f'select * from {test_table}', ).execute({})
+ # do an online query
+ OpenMLDBSQLOperator(task_id='feature-extraction-online', db=test_db,
+ mode=Mode.ONLINE,
+ sql=f'select * from {test_table}').execute({})
+
+ OpenMLDBSQLOperator(task_id='feature-extraction-online-bad', db=test_db,
+ mode=Mode.ONLINE,
+ sql='select * from not_exist_table',
+ response_check=lambda response: (response.json()['code'] == -1) and (
+ "not exists" in response.json()['msg'])).execute({})
diff --git a/go/conn.go b/go/conn.go
new file mode 100644
index 00000000000..13550a54151
--- /dev/null
+++ b/go/conn.go
@@ -0,0 +1,290 @@
+package openmldb
+
+import (
+ "bytes"
+ "context"
+ interfaces "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+)
+
+var (
+ _ interfaces.Conn = (*conn)(nil)
+
+ // All Conn implementations should implement the following interfaces:
+ // Pinger, SessionResetter, and Validator.
+
+ _ interfaces.Pinger = (*conn)(nil)
+ _ interfaces.SessionResetter = (*conn)(nil)
+ _ interfaces.Validator = (*conn)(nil)
+
+ // If named parameters or context are supported, the driver's Conn should implement:
+ // ExecerContext, QueryerContext, ConnPrepareContext, and ConnBeginTx.
+
+ _ interfaces.ExecerContext = (*conn)(nil)
+ _ interfaces.QueryerContext = (*conn)(nil)
+
+ _ interfaces.Rows = (*respDataRows)(nil)
+)
+
+type queryMode string
+
+func (m queryMode) String() string {
+ switch m {
+ case ModeOffsync:
+ return "offsync"
+ case ModeOffasync:
+ return "offasync"
+ case ModeOnline:
+ return "online"
+ default:
+ return "unknown"
+ }
+}
+
+const (
+ ModeOffsync queryMode = "offsync"
+ ModeOffasync queryMode = "offasync"
+ ModeOnline queryMode = "online"
+)
+
+var allQueryMode = map[string]queryMode{
+ "offsync": ModeOffsync,
+ "offasync": ModeOffasync,
+ "online": ModeOnline,
+}
+
+type conn struct {
+ host string // host or host:port
+ db string // database name
+ mode queryMode
+ closed bool
+}
+
+type queryResp struct {
+ Code int `json:"code"`
+ Msg string `json:"msg"`
+ Data *respData `json:"data,omitempty"`
+}
+
+type respData struct {
+ Schema []string `json:"schema"`
+ Data [][]interfaces.Value `json:"data"`
+}
+
+type respDataRows struct {
+ respData
+ i int
+}
+
+// Columns returns the names of the columns. The number of
+// columns of the result is inferred from the length of the
+// slice. If a particular column name isn't known, an empty
+// string should be returned for that entry.
+func (r respDataRows) Columns() []string {
+ return make([]string, len(r.Schema))
+}
+
+// Close closes the rows iterator.
+func (r *respDataRows) Close() error {
+ r.i = len(r.Data)
+ return nil
+}
+
+// Next is called to populate the next row of data into
+// the provided slice. The provided slice will be the same
+// size as the Columns() are wide.
+//
+// Next should return io.EOF when there are no more rows.
+//
+// The dest should not be written to outside of Next. Care
+// should be taken when closing Rows not to modify
+// a buffer held in dest.
+func (r *respDataRows) Next(dest []interfaces.Value) error {
+ if r.i >= len(r.Data) {
+ return io.EOF
+ }
+
+ copy(dest, r.Data[r.i])
+ r.i++
+ return nil
+}
+
+type queryReq struct {
+ Mode string `json:"mode"`
+ SQL string `json:"sql"`
+ Input *queryInput `json:"input,omitempty"`
+}
+
+type queryInput struct {
+ Schema []string `json:"schema"`
+ Data []interfaces.Value `json:"data"`
+}
+
+func parseReqToJson(mode, sql string, input ...interfaces.Value) ([]byte, error) {
+ req := queryReq{
+ Mode: mode,
+ SQL: sql,
+ }
+
+ if len(input) > 0 {
+ schema := make([]string, len(input))
+ for i, v := range input {
+ switch v.(type) {
+ case bool:
+ schema[i] = "bool"
+ case int16:
+ schema[i] = "int16"
+ case int32:
+ schema[i] = "int32"
+ case int64:
+ schema[i] = "int64"
+ case float32:
+ schema[i] = "float"
+ case float64:
+ schema[i] = "double"
+ case string:
+ schema[i] = "string"
+ default:
+ return nil, fmt.Errorf("unknown type at index %d", i)
+ }
+ }
+ req.Input = &queryInput{
+ Schema: schema,
+ Data: input,
+ }
+ }
+
+ return json.Marshal(req)
+}
+
+func parseRespFromJson(respBody io.Reader) (*queryResp, error) {
+ var r queryResp
+ if err := json.NewDecoder(respBody).Decode(&r); err != nil {
+ return nil, err
+ }
+
+ if r.Data != nil {
+ for _, row := range r.Data.Data {
+ for i, col := range row {
+ switch strings.ToLower(r.Data.Schema[i]) {
+ case "bool":
+ row[i] = col.(bool)
+ case "int16":
+ row[i] = int16(col.(float64))
+ case "int32":
+ row[i] = int32(col.(float64))
+ case "int64":
+ row[i] = int64(col.(float64))
+ case "float":
+ row[i] = float32(col.(float64))
+ case "double":
+ row[i] = float64(col.(float64))
+ case "string":
+ row[i] = col.(string)
+ default:
+ return nil, fmt.Errorf("unknown type %s at index %d", r.Data.Schema[i], i)
+ }
+ }
+ }
+ }
+
+ return &r, nil
+}
+
+func (c *conn) query(ctx context.Context, sql string, parameters ...interfaces.Value) (rows interfaces.Rows, err error) {
+ if c.closed {
+ return nil, interfaces.ErrBadConn
+ }
+
+ reqBody, err := parseReqToJson(string(c.mode), sql, parameters...)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequestWithContext(
+ ctx,
+ "POST",
+ fmt.Sprintf("http://%s/dbs/%s", c.host, c.db),
+ bytes.NewBuffer(reqBody),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if r, err := parseRespFromJson(resp.Body); err != nil {
+ return nil, err
+ } else if r.Code != 0 {
+ return nil, fmt.Errorf("conn error: %s", r.Msg)
+ } else if r.Data != nil {
+ return &respDataRows{*r.Data, 0}, nil
+ }
+
+ return nil, nil
+}
+
+// Prepare implements driver.Conn.
+func (c *conn) Prepare(query string) (interfaces.Stmt, error) {
+ return nil, errors.New("Prepare is not implemented, use QueryContext instead")
+}
+
+// Close implements driver.Conn.
+func (c *conn) Close() error {
+ c.closed = true
+ return nil
+}
+
+// Begin implements driver.Conn.
+func (c *conn) Begin() (interfaces.Tx, error) {
+ return nil, errors.New("begin not implemented")
+}
+
+// Ping implements driver.Pinger.
+func (c *conn) Ping(ctx context.Context) error {
+ _, err := c.query(ctx, "SELECT 1")
+ return err
+}
+
+// ResetSession implements driver.SessionResetter.
+//
+// Before a connection is reused for another query, ResetSession is called.
+func (c *conn) ResetSession(ctx context.Context) error {
+ return nil
+}
+
+// IsValid implements driver.Validator.
+//
+// Before a connection is returned to the connection pool after use, IsValid is called.
+func (c *conn) IsValid() bool {
+ return !c.closed
+}
+
+// ExecContext implements driver.ExecerContext.
+func (c *conn) ExecContext(ctx context.Context, query string, args []interfaces.NamedValue) (interfaces.Result, error) {
+ parameters := make([]interfaces.Value, len(args))
+ for i, arg := range args {
+ parameters[i] = arg.Value
+ }
+ if _, err := c.query(ctx, query, parameters...); err != nil {
+ return nil, err
+ }
+ return interfaces.ResultNoRows, nil
+}
+
+// QueryContext implements driver.QueryerContext.
+func (c *conn) QueryContext(ctx context.Context, query string, args []interfaces.NamedValue) (interfaces.Rows, error) {
+ parameters := make([]interfaces.Value, len(args))
+ for i, arg := range args {
+ parameters[i] = arg.Value
+ }
+ return c.query(ctx, query, parameters...)
+}
diff --git a/go/conn_test.go b/go/conn_test.go
new file mode 100644
index 00000000000..b2508820857
--- /dev/null
+++ b/go/conn_test.go
@@ -0,0 +1,109 @@
+package openmldb
+
+import (
+ interfaces "database/sql/driver"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseReqToJson(t *testing.T) {
+ for _, tc := range []struct {
+ mode string
+ sql string
+ input []interfaces.Value
+ expect string
+ }{
+ {
+ "offsync",
+ "SELECT 1;",
+ nil,
+ `{
+ "mode": "offsync",
+ "sql": "SELECT 1;"
+ }`,
+ },
+ {
+ "offsync",
+ "SELECT c1, c2 FROM demo WHERE c1 = ? AND c2 = ?;",
+ []interfaces.Value{int32(1), "bb"},
+ `{
+ "mode": "offsync",
+ "sql": "SELECT c1, c2 FROM demo WHERE c1 = ? AND c2 = ?;",
+ "input": {
+ "schema": ["int32", "string"],
+ "data": [1, "bb"]
+ }
+ }`,
+ },
+ } {
+ actual, err := parseReqToJson(tc.mode, tc.sql, tc.input...)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tc.expect, string(actual))
+ }
+}
+
+func TestParseRespFromJson(t *testing.T) {
+ for _, tc := range []struct {
+ resp string
+ expect queryResp
+ }{
+ {
+ `{
+ "code": 0,
+ "msg": "ok"
+ }`,
+ queryResp{
+ Code: 0,
+ Msg: "ok",
+ Data: nil,
+ },
+ },
+ {
+ `{
+ "code": 0,
+ "msg": "ok",
+ "data": {
+ "schema": ["Int32", "String"],
+ "data": [[1, "bb"], [2, "bb"]]
+ }
+ }`,
+ queryResp{
+ Code: 0,
+ Msg: "ok",
+ Data: &respData{
+ Schema: []string{"Int32", "String"},
+ Data: [][]interfaces.Value{
+ {int32(1), "bb"},
+ {int32(2), "bb"},
+ },
+ },
+ },
+ },
+ {
+ `{
+ "code": 0,
+ "msg": "ok",
+ "data": {
+ "schema": ["Bool", "Int16", "Int32", "Int64", "Float", "Double", "String"],
+ "data": [[true, 1, 1, 1, 1, 1, "bb"]]
+ }
+ }`,
+ queryResp{
+ Code: 0,
+ Msg: "ok",
+ Data: &respData{
+ Schema: []string{"Bool", "Int16", "Int32", "Int64", "Float", "Double", "String"},
+ Data: [][]interfaces.Value{
+ {true, int16(1), int32(1), int64(1), float32(1), float64(1), "bb"},
+ },
+ },
+ },
+ },
+ } {
+ actual, err := parseRespFromJson(strings.NewReader(tc.resp))
+ assert.NoError(t, err)
+ assert.Equal(t, &tc.expect, actual)
+ }
+}
diff --git a/go/driver.go b/go/driver.go
new file mode 100644
index 00000000000..8cf205b92f3
--- /dev/null
+++ b/go/driver.go
@@ -0,0 +1,92 @@
+package openmldb
+
+import (
+ "context"
+ "database/sql"
+ interfaces "database/sql/driver"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+func init() {
+ sql.Register("openmldb", &driver{})
+}
+
+var (
+ _ interfaces.Driver = (*driver)(nil)
+ _ interfaces.DriverContext = (*driver)(nil)
+
+ _ interfaces.Connector = (*connecter)(nil)
+)
+
+type driver struct{}
+
+func parseDsn(dsn string) (host string, db string, mode queryMode, err error) {
+ u, err := url.Parse(dsn)
+ if err != nil {
+ return "", "", "", fmt.Errorf("invlaid URL: %w", err)
+ }
+
+ if u.Scheme != "openmldb" && u.Scheme != "" {
+ return "", "", "", fmt.Errorf("invalid URL: unknown schema '%s'", u.Scheme)
+ }
+
+ p := strings.Split(strings.TrimLeft(u.Path, "/"), "/")
+
+ mode = ModeOffsync
+ if u.Query().Has("mode") {
+ m := u.Query().Get("mode")
+ if _, ok := allQueryMode[m]; !ok {
+ return "", "", "", fmt.Errorf("invalid mode: %s", m)
+ }
+ mode = allQueryMode[m]
+ }
+
+ if len(p) == 0 {
+ return "", "", "", fmt.Errorf("invalid URL: DB name not found")
+ }
+
+ return u.Host, p[0], mode, nil
+}
+
+// Open implements driver.Driver.
+func (driver) Open(name string) (interfaces.Conn, error) {
+ // name should be the URL of the api server, e.g. openmldb://localhost:6543/db
+ host, db, mode, err := parseDsn(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &conn{host: host, db: db, mode: mode, closed: false}, nil
+}
+
+type connecter struct {
+ host string
+ db string
+ mode queryMode
+}
+
+// Connect implements driver.Connector.
+func (c connecter) Connect(ctx context.Context) (interfaces.Conn, error) {
+ conn := &conn{host: c.host, db: c.db, mode: c.mode, closed: false}
+ if err := conn.Ping(ctx); err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// Driver implements driver.Connector.
+func (connecter) Driver() interfaces.Driver {
+ return &driver{}
+}
+
+// OpenConnector implements driver.DriverContext.
+func (driver) OpenConnector(name string) (interfaces.Connector, error) {
+ host, db, mode, err := parseDsn(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return &connecter{host, db, mode}, nil
+}
diff --git a/go/driver_test.go b/go/driver_test.go
new file mode 100644
index 00000000000..00cca24a7c8
--- /dev/null
+++ b/go/driver_test.go
@@ -0,0 +1,33 @@
+package openmldb
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_parseDsn(t *testing.T) {
+ for _, tc := range []struct {
+ dsn string
+ host string
+ db string
+ mode queryMode
+ err error
+ }{
+ {"openmldb://127.0.0.1:8080/test_db", "127.0.0.1:8080", "test_db", ModeOffsync, nil},
+ {"openmldb://127.0.0.1:8080/test_db?mode=online", "127.0.0.1:8080", "test_db", ModeOnline, nil},
+ {"openmldb://127.0.0.1:8080/test_db?mode=offasync", "127.0.0.1:8080", "test_db", ModeOffasync, nil},
+ {"openmldb://127.0.0.1:8080/test_db?mode=unknown", "127.0.0.1:8080", "test_db", "", errors.New("")},
+ } {
+ host, db, mode, err := parseDsn(tc.dsn)
+ if tc.err == nil {
+ assert.NoError(t, err)
+ assert.Equal(t, host, tc.host)
+ assert.Equal(t, db, tc.db)
+ assert.Equal(t, mode, tc.mode)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+}
diff --git a/go/go.mod b/go/go.mod
new file mode 100644
index 00000000000..90e3c9e8d43
--- /dev/null
+++ b/go/go.mod
@@ -0,0 +1,11 @@
+module github.com/4paradigm/OpenMLDB/go
+
+go 1.18
+
+require github.com/stretchr/testify v1.8.0
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/go/go.sum b/go/go.sum
new file mode 100644
index 00000000000..b410979a437
--- /dev/null
+++ b/go/go.sum
@@ -0,0 +1,14 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/go/go_sdk_test.go b/go/go_sdk_test.go
new file mode 100644
index 00000000000..b98e1858a62
--- /dev/null
+++ b/go/go_sdk_test.go
@@ -0,0 +1,91 @@
+package openmldb_test
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "os"
+ "testing"
+
+ // register openmldb driver
+ _ "github.com/4paradigm/OpenMLDB/go"
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ OPENMLDB_APISERVER_HOST = os.Getenv("OPENMLDB_APISERVER_HOST")
+ OPENMLDB_APISERVER_PORT = os.Getenv("OPENMLDB_APISERVER_PORT")
+)
+
+func Test_driver(t *testing.T) {
+ db, err := sql.Open("openmldb", fmt.Sprintf("openmldb://%s:%s/test_db", OPENMLDB_APISERVER_HOST, OPENMLDB_APISERVER_PORT))
+ if err != nil {
+ t.Errorf("fail to open connect: %s", err)
+ }
+
+ defer func() {
+ if err := db.Close(); err != nil {
+ t.Errorf("fail to close connection: %s", err)
+ }
+ }()
+
+ ctx := context.Background()
+ assert.NoError(t, db.PingContext(ctx), "fail to ping connect")
+
+ {
+ createTableStmt := "CREATE TABLE demo(c1 int, c2 string);"
+ _, err := db.ExecContext(ctx, createTableStmt)
+ assert.NoError(t, err, "fail to exec %s", createTableStmt)
+ }
+ {
+ insertValueStmt := `INSERT INTO demo VALUES (1, "bb"), (2, "bb");`
+ _, err := db.ExecContext(ctx, insertValueStmt)
+ assert.NoError(t, err, "fail to exec %s", insertValueStmt)
+ }
+
+ t.Run("query", func(t *testing.T) {
+ queryStmt := `SELECT c1, c2 FROM demo`
+ rows, err := db.QueryContext(ctx, queryStmt)
+ assert.NoError(t, err, "fail to query %s", queryStmt)
+
+ var demo struct {
+ c1 int32
+ c2 string
+ }
+ {
+ assert.True(t, rows.Next())
+ assert.NoError(t, rows.Scan(&demo.c1, &demo.c2))
+ assert.Equal(t, struct {
+ c1 int32
+ c2 string
+ }{1, "bb"}, demo)
+ }
+ {
+ assert.True(t, rows.Next())
+ assert.NoError(t, rows.Scan(&demo.c1, &demo.c2))
+ assert.Equal(t, struct {
+ c1 int32
+ c2 string
+ }{2, "bb"}, demo)
+ }
+ })
+
+ t.Run("query with parameter", func(t *testing.T) {
+ parameterQueryStmt := `SELECT c1, c2 FROM demo WHERE c2 = ? AND c1 = ?;`
+ rows, err := db.QueryContext(ctx, parameterQueryStmt, "bb", 1)
+ assert.NoError(t, err, "fail to query %s", parameterQueryStmt)
+
+ var demo struct {
+ c1 int32
+ c2 string
+ }
+ {
+ assert.True(t, rows.Next())
+ assert.NoError(t, rows.Scan(&demo.c1, &demo.c2))
+ assert.Equal(t, struct {
+ c1 int32
+ c2 string
+ }{1, "bb"}, demo)
+ }
+ })
+}
diff --git a/hybridse/.gitignore b/hybridse/.gitignore
index 366f6e645a9..029c7da8f2f 100644
--- a/hybridse/.gitignore
+++ b/hybridse/.gitignore
@@ -18,6 +18,9 @@ src/hyhridse_version.h
# ignore docgen
tools/documentation/udf_doxygen/html
tools/documentation/udf_doxygen/udfs
+tools/documentation/udf_doxygen/udf
+tools/documentation/udf_doxygen/udfgen
+tools/documentation/udf_doxygen/xml
style.xml
hybridse_version.h
intermediate_cicd_artifact_.tar.gz
@@ -35,6 +38,3 @@ tools/documentation/java_api/doxybook2_home
tools/documentation/java_api/html
tools/documentation/java_api/xml
tools/documentation/java_api/java
-tools/documentation/udf_doxygen/udf
-tools/documentation/udf_doxygen/doxybook2_home
-tools/documentation/udf_doxygen/xml
diff --git a/hybridse/examples/toydb/src/CMakeLists.txt b/hybridse/examples/toydb/src/CMakeLists.txt
index 8d9cc633c04..2cd7f4ed355 100644
--- a/hybridse/examples/toydb/src/CMakeLists.txt
+++ b/hybridse/examples/toydb/src/CMakeLists.txt
@@ -70,7 +70,7 @@ if (TESTING_ENABLE AND EXAMPLES_TESTING_ENABLE)
--gtest_output=xml:${CMAKE_CURRENT_BINARY_DIR}/${TEST_TARGET_DIR}/${TEST_TARGET_NAME}.xml)
target_link_libraries(${TEST_TARGET_NAME}
toydb_lib toydb_sdk hybridse_flags sqlite3
- ${GTEST_LIBRARIES} benchmark ${yaml_libs} ${BRPC_LIBS} ${OS_LIBS} ${g_libs})
+ ${GTEST_LIBRARIES} benchmark ${yaml_libs} ${OS_LIBS} ${g_libs})
if (TESTING_ENABLE_STRIP)
strip_exe(${TEST_TARGET_NAME})
endif()
diff --git a/hybridse/examples/toydb/src/sdk/tablet_sdk.cc b/hybridse/examples/toydb/src/sdk/tablet_sdk.cc
index 30d9cce2d94..8110c973dee 100644
--- a/hybridse/examples/toydb/src/sdk/tablet_sdk.cc
+++ b/hybridse/examples/toydb/src/sdk/tablet_sdk.cc
@@ -297,7 +297,7 @@ void TabletSdkImpl::BuildInsertRequest(const std::string& db,
request->set_db(db);
std::unordered_set column_set;
- for (size_t i = 0; i < schema.columns().size(); i++) {
+ for (int i = 0; i < schema.columns().size(); i++) {
column_set.insert(schema.columns(i).name());
}
std::map column_value_map;
diff --git a/hybridse/examples/toydb/src/tablet/tablet_catalog.h b/hybridse/examples/toydb/src/tablet/tablet_catalog.h
index 3ea97d325b6..08be85f2568 100644
--- a/hybridse/examples/toydb/src/tablet/tablet_catalog.h
+++ b/hybridse/examples/toydb/src/tablet/tablet_catalog.h
@@ -221,21 +221,18 @@ class TabletCatalog : public vm::Catalog {
bool AddTable(std::shared_ptr table);
- std::shared_ptr GetDatabase(const std::string& db);
+ std::shared_ptr GetDatabase(const std::string& db) override;
+
+ std::shared_ptr GetTable(const std::string& db, const std::string& table_name) override;
- std::shared_ptr GetTable(const std::string& db,
- const std::string& table_name);
bool IndexSupport() override;
- std::vector GetAggrTables(
- const std::string& base_db,
- const std::string& base_table,
- const std::string& aggr_func,
- const std::string& aggr_col,
- const std::string& partition_cols,
- const std::string& order_col) override {
- vm::AggrTableInfo info = {"aggr_" + base_table, "aggr_db", base_db, base_table,
- aggr_func, aggr_col, partition_cols, order_col, "1000"};
+ std::vector GetAggrTables(const std::string& base_db, const std::string& base_table,
+ const std::string& aggr_func, const std::string& aggr_col,
+ const std::string& partition_cols, const std::string& order_col,
+ const std::string& filter_col) override {
+ vm::AggrTableInfo info = {"aggr_" + base_table, "aggr_db", base_db, base_table, aggr_func, aggr_col,
+ partition_cols, order_col, "1000", filter_col};
return {info};
}
diff --git a/hybridse/include/case/sql_case.h b/hybridse/include/case/sql_case.h
index 2bfefbb476c..cec6f6d6330 100644
--- a/hybridse/include/case/sql_case.h
+++ b/hybridse/include/case/sql_case.h
@@ -16,14 +16,18 @@
#ifndef HYBRIDSE_INCLUDE_CASE_SQL_CASE_H_
#define HYBRIDSE_INCLUDE_CASE_SQL_CASE_H_
-#include
-#include
-#include
+
#include
#include
#include
+
+#include "absl/status/statusor.h"
#include "codec/fe_row_codec.h"
#include "proto/fe_type.pb.h"
+#include "vm/catalog.h"
+#include "yaml-cpp/node/node.h"
+#include "yaml-cpp/yaml.h"
+
namespace hybridse {
namespace sqlcase {
class SqlCase {
@@ -193,14 +197,10 @@ class SqlCase {
static std::string GenRand(const std::string& prefix) {
return prefix + std::to_string(rand() % 10000000 + 1); // NOLINT
}
- bool BuildCreateSpSqlFromInput(int32_t input_idx,
- const std::string& select_sql,
- const std::set& common_idx,
- std::string* create_sp_sql);
- bool BuildCreateSpSqlFromSchema(const type::TableDef& table,
- const std::string& select_sql,
- const std::set& common_idx,
- std::string* create_sql);
+ absl::StatusOr BuildCreateSpSqlFromInput(int32_t input_idx, absl::string_view sql,
+ const std::set& common_idx);
+ absl::StatusOr BuildCreateSpSqlFromSchema(const type::TableDef& table, absl::string_view select_sql,
+ const std::set& common_idx);
friend std::ostream& operator<<(std::ostream& output, const SqlCase& thiz);
static bool IS_PERF() {
diff --git a/hybridse/include/codec/fe_row_codec.h b/hybridse/include/codec/fe_row_codec.h
index 81757a21430..1e0e5b1badc 100644
--- a/hybridse/include/codec/fe_row_codec.h
+++ b/hybridse/include/codec/fe_row_codec.h
@@ -229,19 +229,19 @@ class RowFormat {
class MultiSlicesRowFormat : public RowFormat {
public:
explicit MultiSlicesRowFormat(const Schema* schema) {
- slice_formats_.emplace_back(SliceFormat(schema));
- }
-
- ~MultiSlicesRowFormat() {
- slice_formats_.clear();
+ slice_formats_.emplace_back(schema);
}
explicit MultiSlicesRowFormat(const std::vector& schemas) {
for (auto schema : schemas) {
- slice_formats_.emplace_back(SliceFormat(schema));
+ slice_formats_.emplace_back(schema);
}
}
+ ~MultiSlicesRowFormat() override {
+ slice_formats_.clear();
+ }
+
bool GetStringColumnInfo(size_t schema_idx, size_t idx, StringColInfo* res) const override {
return slice_formats_[schema_idx].GetStringColumnInfo(idx, res);
}
@@ -265,13 +265,6 @@ class SingleSliceRowFormat : public RowFormat {
offsets_.emplace_back(0);
}
- ~SingleSliceRowFormat() {
- offsets_.clear();
- if (slice_format_) {
- delete slice_format_;
- }
- }
-
explicit SingleSliceRowFormat(const std::vector& schemas) {
int offset = 0;
for (auto schema : schemas) {
@@ -284,6 +277,13 @@ class SingleSliceRowFormat : public RowFormat {
slice_format_ = new SliceFormat(&merged_schema_);
}
+ ~SingleSliceRowFormat() override {
+ offsets_.clear();
+ if (slice_format_) {
+ delete slice_format_;
+ }
+ }
+
bool GetStringColumnInfo(size_t schema_idx, size_t idx, StringColInfo* res) const override {
return slice_format_->GetStringColumnInfo(offsets_[schema_idx] + idx, res);
}
diff --git a/hybridse/include/node/node_enum.h b/hybridse/include/node/node_enum.h
index 9b43c0ca4c0..b00933c5a79 100644
--- a/hybridse/include/node/node_enum.h
+++ b/hybridse/include/node/node_enum.h
@@ -177,8 +177,8 @@ enum FnOperator {
kFnOpAdd, // "+"
kFnOpMinus, // "-"
kFnOpMulti, // "*"
- kFnOpDiv, // "/"
- kFnOpFDiv, // "div", float division
+ kFnOpDiv, // "DIV", integer division
+ kFnOpFDiv, // "/", float division
kFnOpMod, // "%"
kFnOpAnd, // "AND", logical
kFnOpOr, // "OR" , logical
@@ -193,7 +193,8 @@ enum FnOperator {
kFnOpDot, // "."
kFnOpAt, // "[]"
kFnOpLike, // "LIKE"
- kFnOpILike, // "ILIKE"
+ kFnOpILike, // "ILIKE"
+ kFnOpRLike, // "RLIKE"
kFnOpIn, // "IN"
kFnOpBracket, // "()"
kFnOpIsNull, // "is_null"
diff --git a/hybridse/include/node/node_manager.h b/hybridse/include/node/node_manager.h
index e16282dffaf..d9f7a7a89bb 100644
--- a/hybridse/include/node/node_manager.h
+++ b/hybridse/include/node/node_manager.h
@@ -261,8 +261,8 @@ class NodeManager {
DeployPlanNode *MakeDeployPlanNode(const std::string &name, const SqlNode *stmt, const std::string &stmt_str,
const std::shared_ptr options, bool if_not_exist);
- // create a delete job node
- DeleteNode* MakeDeleteNode(DeleteTarget target, std::string_view job_id);
+ DeleteNode* MakeDeleteNode(DeleteTarget target, std::string_view job_id,
+ const std::string& db_name, const std::string& table, node::ExprNode* where_expr);
DeletePlanNode* MakeDeletePlanNode(const DeleteNode* node);
LoadDataNode *MakeLoadDataNode(const std::string &file_name, const std::string &db, const std::string &table,
@@ -384,7 +384,7 @@ class NodeManager {
SqlNode *MakePartitionNumNode(int num);
- SqlNode *MakeDistributionsNode(SqlNodeList *distribution_list);
+ SqlNode *MakeDistributionsNode(const NodePointVector& distribution_list);
SqlNode *MakeCreateProcedureNode(const std::string &sp_name,
SqlNodeList *input_parameter_list,
diff --git a/hybridse/include/node/plan_node.h b/hybridse/include/node/plan_node.h
index 94f2942d8c4..d63797e5c33 100644
--- a/hybridse/include/node/plan_node.h
+++ b/hybridse/include/node/plan_node.h
@@ -476,8 +476,10 @@ class CmdPlanNode : public LeafPlanNode {
class DeletePlanNode : public LeafPlanNode {
public:
- DeletePlanNode(DeleteTarget target, std::string job_id)
- : LeafPlanNode(kPlanTypeDelete), target_(target), job_id_(job_id) {}
+ DeletePlanNode(DeleteTarget target, std::string job_id,
+ const std::string& db_name, const std::string& table_name, const node::ExprNode* expression)
+ : LeafPlanNode(kPlanTypeDelete), target_(target), job_id_(job_id),
+ db_name_(db_name), table_name_(table_name), condition_(expression) {}
~DeletePlanNode() {}
bool Equals(const PlanNode* that) const override;
@@ -485,10 +487,16 @@ class DeletePlanNode : public LeafPlanNode {
const DeleteTarget GetTarget() const { return target_; }
const std::string& GetJobId() const { return job_id_; }
+ const std::string& GetDatabase() const { return db_name_; }
+ const std::string& GetTableName() const { return table_name_; }
+ const ExprNode* GetCondition() const { return condition_; }
private:
const DeleteTarget target_;
const std::string job_id_;
+ const std::string db_name_;
+ const std::string table_name_;
+ const ExprNode *condition_;
};
class DeployPlanNode : public LeafPlanNode {
diff --git a/hybridse/include/node/sql_node.h b/hybridse/include/node/sql_node.h
index 101e3a3991d..13ca86f40a7 100644
--- a/hybridse/include/node/sql_node.h
+++ b/hybridse/include/node/sql_node.h
@@ -24,6 +24,7 @@
#include
#include
+#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "boost/algorithm/string.hpp"
@@ -125,6 +126,8 @@ inline const std::string ExprOpTypeName(const FnOperator &op) {
return "LIKE";
case kFnOpILike:
return "ILIKE";
+ case kFnOpRLike:
+ return "RLIKE";
case kFnOpIn:
return "IN";
case kFnOpBracket:
@@ -562,6 +565,9 @@ class ExprNode : public SqlNode {
static Status LikeTypeAccept(node::NodeManager* nm, const TypeNode* lhs, const TypeNode* rhs,
const TypeNode** output);
+ static Status RlikeTypeAccept(node::NodeManager* nm, const TypeNode* lhs, const TypeNode* rhs,
+ const TypeNode** output);
+
private:
const TypeNode *output_type_ = nullptr;
bool nullable_ = true;
@@ -1068,12 +1074,41 @@ class ConstNode : public ExprNode {
return std::to_string(val_.vdouble);
case kVarchar:
return std::string(val_.vstr);
+ case kBool:
+ return val_.vint == 1 ? "true" : "false";
default: {
return "";
}
}
}
+ // include 'udf/literal_traits.h' for Nullable lead to recursive include
+ // so `optional` is used for nullable info
+ template
+ absl::StatusOr> GetAs() const {
+ if (IsNull()) {
+ return std::nullopt;
+ }
+
+ if constexpr (std::is_same_v) {
+ return GetBool();
+ } else if constexpr(std::is_same_v) {
+ return GetAsInt16();
+ } else if constexpr (std::is_same_v) {
+ return GetAsInt32();
+ } else if constexpr (std::is_same_v) {
+ return GetAsInt64();
+ } else if constexpr (std::is_same_v) {
+ return GetAsFloat();
+ } else if constexpr (std::is_same_v) {
+ return GetAsDouble();
+ } else if constexpr (std::is_same_v) {
+ return GetAsString();
+ } else {
+ return absl::InvalidArgumentError("can't cast as T");
+ }
+ }
+
Status InferAttr(ExprAnalysisContext *ctx) override;
static ConstNode *CastFrom(ExprNode *node);
@@ -1618,7 +1653,7 @@ class ColumnRefNode : public ExprNode {
void SetRelationName(const std::string &relation_name) { relation_name_ = relation_name; }
- std::string GetColumnName() const { return column_name_; }
+ const std::string &GetColumnName() const { return column_name_; }
void SetColumnName(const std::string &column_name) { column_name_ = column_name; }
@@ -2029,31 +2064,40 @@ class CmdNode : public SqlNode {
};
enum class DeleteTarget {
- JOB
+ JOB = 1,
+ TABLE = 2,
};
std::string DeleteTargetString(DeleteTarget target);
class DeleteNode : public SqlNode {
public:
- explicit DeleteNode(DeleteTarget t, std::string job_id)
- : SqlNode(kDeleteStmt, 0, 0), target_(t), job_id_(job_id) {}
- ~DeleteNode() {}
+ DeleteNode(DeleteTarget t, std::string job_id,
+ const std::string& db_name, const std::string& table_name, const node::ExprNode* where_expr)
+ : SqlNode(kDeleteStmt, 0, 0), target_(t), job_id_(job_id),
+ db_name_(db_name), table_name_(table_name), condition_(where_expr) {}
+ ~DeleteNode() = default;
void Print(std::ostream &output, const std::string &org_tab) const override;
std::string GetTargetString() const;
const DeleteTarget GetTarget() const { return target_; }
const std::string& GetJobId() const { return job_id_; }
+ const std::string& GetTableName() const { return table_name_; }
+ const std::string& GetDbName() const { return db_name_; }
+ const ExprNode* GetCondition() const { return condition_; }
private:
const DeleteTarget target_;
const std::string job_id_;
+ const std::string db_name_;
+ const std::string table_name_;
+ const ExprNode *condition_;
};
class SelectIntoNode : public SqlNode {
public:
- explicit SelectIntoNode(const QueryNode *query, const std::string &query_str, const std::string &out,
- const std::shared_ptr&& options, const std::shared_ptr&& op2)
+ SelectIntoNode(const QueryNode *query, const std::string &query_str, const std::string &out,
+ const std::shared_ptr&& options, const std::shared_ptr&& op2)
: SqlNode(kSelectIntoStmt, 0, 0),
query_(query),
query_str_(query_str),
@@ -2697,17 +2741,17 @@ class PartitionNumNode : public SqlNode {
class DistributionsNode : public SqlNode {
public:
- explicit DistributionsNode(SqlNodeList *distribution_list)
+ explicit DistributionsNode(const NodePointVector& distribution_list)
: SqlNode(kDistributions, 0, 0), distribution_list_(distribution_list) {}
~DistributionsNode() {}
- const SqlNodeList *GetDistributionList() const { return distribution_list_; }
+ const NodePointVector& GetDistributionList() const { return distribution_list_; }
void Print(std::ostream &output, const std::string &org_tab) const;
private:
- SqlNodeList *distribution_list_;
+ NodePointVector distribution_list_;
};
class CreateSpStmt : public SqlNode {
diff --git a/hybridse/include/sdk/base.h b/hybridse/include/sdk/base.h
index 4e816766f51..a6d9e0f180c 100644
--- a/hybridse/include/sdk/base.h
+++ b/hybridse/include/sdk/base.h
@@ -18,10 +18,15 @@
#define HYBRIDSE_INCLUDE_SDK_BASE_H_
#include
+
#include
+#include
#include
-#include
#include
+#include
+
+#include "absl/strings/string_view.h"
+#include "sdk/base_schema.h"
namespace hybridse {
namespace sdk {
@@ -30,66 +35,13 @@ struct Status {
Status() : code(0), msg("ok") {}
Status(int status_code, const std::string& msg_str)
: code(status_code), msg(msg_str) {}
+ Status(int status_code, absl::string_view msg_str, absl::string_view trace)
+ : code(status_code), msg(msg_str), trace(trace) {}
bool IsOK() const { return code == 0; }
+
int code;
- std::string trace;
std::string msg;
-};
-
-enum DataType {
- kTypeBool = 0,
- kTypeInt16,
- kTypeInt32,
- kTypeInt64,
- kTypeFloat,
- kTypeDouble,
- kTypeString,
- kTypeDate,
- kTypeTimestamp,
- kTypeUnknow
-};
-
-inline const std::string DataTypeName(const DataType& type) {
- switch (type) {
- case kTypeBool:
- return "bool";
- case kTypeInt16:
- return "int16";
- case kTypeInt32:
- return "int32";
- case kTypeInt64:
- return "int64";
- case kTypeFloat:
- return "float";
- case kTypeDouble:
- return "double";
- case kTypeString:
- return "string";
- case kTypeTimestamp:
- return "timestamp";
- case kTypeDate:
- return "date";
- default:
- return "unknownType";
- }
-}
-
-class Schema {
- public:
- Schema() : empty() {}
- virtual ~Schema() {}
- virtual int32_t GetColumnCnt() const { return 0; }
- virtual const std::string& GetColumnName(uint32_t index) const {
- return empty;
- }
- virtual const DataType GetColumnType(uint32_t index) const {
- return kTypeUnknow;
- }
- virtual const bool IsColumnNotNull(uint32_t index) const { return false; }
- virtual const bool IsConstant(uint32_t index) const { return false; }
-
- private:
- std::string empty;
+ std::string trace;
};
class Table {
diff --git a/hybridse/include/sdk/base_schema.h b/hybridse/include/sdk/base_schema.h
new file mode 100644
index 00000000000..b9315156d5e
--- /dev/null
+++ b/hybridse/include/sdk/base_schema.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2021 4Paradigm
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HYBRIDSE_INCLUDE_SDK_BASE_SCHEMA_H_
+#define HYBRIDSE_INCLUDE_SDK_BASE_SCHEMA_H_
+
+#include
+
+#include
+
+namespace hybridse {
+namespace sdk {
+
+enum DataType {
+ kTypeBool = 0,
+ kTypeInt16,
+ kTypeInt32,
+ kTypeInt64,
+ kTypeFloat,
+ kTypeDouble,
+ kTypeString,
+ kTypeDate,
+ kTypeTimestamp,
+ kTypeUnknow
+};
+
+inline const std::string DataTypeName(const DataType& type) {
+ switch (type) {
+ case kTypeBool:
+ return "bool";
+ case kTypeInt16:
+ return "int16";
+ case kTypeInt32:
+ return "int32";
+ case kTypeInt64:
+ return "int64";
+ case kTypeFloat:
+ return "float";
+ case kTypeDouble:
+ return "double";
+ case kTypeString:
+ return "string";
+ case kTypeTimestamp:
+ return "timestamp";
+ case kTypeDate:
+ return "date";
+ default:
+ return "unknownType";
+ }
+}
+
+class Schema {
+ public:
+ Schema() : empty() {}
+ virtual ~Schema() {}
+ virtual int32_t GetColumnCnt() const { return 0; }
+ virtual const std::string& GetColumnName(uint32_t index) const { return empty; }
+ virtual const DataType GetColumnType(uint32_t index) const { return kTypeUnknow; }
+ virtual const bool IsColumnNotNull(uint32_t index) const { return false; }
+ virtual const bool IsConstant(uint32_t index) const { return false; }
+
+ private:
+ std::string empty;
+};
+} // namespace sdk
+} // namespace hybridse
+#endif // HYBRIDSE_INCLUDE_SDK_BASE_SCHEMA_H_
diff --git a/hybridse/include/sdk/result_set.h b/hybridse/include/sdk/result_set.h
index 7551ac6b1d4..c36d4bb2f0d 100644
--- a/hybridse/include/sdk/result_set.h
+++ b/hybridse/include/sdk/result_set.h
@@ -21,7 +21,7 @@
#include
-#include "sdk/base.h"
+#include "sdk/base_schema.h"
namespace hybridse {
namespace sdk {
@@ -48,7 +48,7 @@ class ResultSet {
return val;
}
- const bool GetAsString(uint32_t idx, std::string& val) { // NOLINT
+ virtual const bool GetAsString(uint32_t idx, std::string& val) { // NOLINT
if (nullptr == GetSchema()) {
return false;
}
diff --git a/hybridse/include/vm/catalog.h b/hybridse/include/vm/catalog.h
index 7980fdbd5f0..30e68316606 100644
--- a/hybridse/include/vm/catalog.h
+++ b/hybridse/include/vm/catalog.h
@@ -471,6 +471,7 @@ struct AggrTableInfo {
std::string partition_cols;
std::string order_by_col;
std::string bucket_size;
+ std::string filter_col;
bool operator==(const AggrTableInfo& rhs) const {
return aggr_table == rhs.aggr_table &&
@@ -481,7 +482,8 @@ struct AggrTableInfo {
aggr_col == rhs.aggr_col &&
partition_cols == rhs.partition_cols &&
order_by_col == rhs.order_by_col &&
- bucket_size == rhs.bucket_size;
+ bucket_size == rhs.bucket_size &&
+ filter_col == rhs.filter_col;
}
};
@@ -514,13 +516,10 @@ class Catalog {
return nullptr;
}
- virtual std::vector GetAggrTables(
- const std::string& base_db,
- const std::string& base_table,
- const std::string& aggr_func,
- const std::string& aggr_col,
- const std::string& partition_cols,
- const std::string& order_col) {
+ virtual std::vector GetAggrTables(const std::string& base_db, const std::string& base_table,
+ const std::string& aggr_func, const std::string& aggr_col,
+ const std::string& partition_cols, const std::string& order_col,
+ const std::string& filter_col) {
return std::vector();
}
};
diff --git a/hybridse/include/vm/mem_catalog.h b/hybridse/include/vm/mem_catalog.h
index b393ed861ec..ecffedb03c7 100644
--- a/hybridse/include/vm/mem_catalog.h
+++ b/hybridse/include/vm/mem_catalog.h
@@ -21,6 +21,7 @@
#include
#include