Skip to content

Commit

Permalink
Merge branch 'main' into yufan/dashboard-frontend
Browse files Browse the repository at this point in the history
  • Loading branch information
fuyufjh authored Feb 26, 2024
2 parents 8d96ac2 + d50b4cb commit ae7d7ba
Show file tree
Hide file tree
Showing 253 changed files with 4,597 additions and 1,820 deletions.
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Makefile.toml
Original file line number Diff line number Diff line change
Expand Up @@ -757,10 +757,10 @@ tmux list-windows -t risedev -F "#{window_name} #{pane_id}" \
if [[ -n $(tmux list-windows -t risedev | grep kafka) ]];
then
echo "kill kafka"
kill_kafka
kill_kafka || true
echo "kill zookeeper"
kill_zookeeper
kill_zookeeper || true
# Kill their tmux sessions
tmux list-windows -t risedev -F "#{pane_id}" | xargs -I {} tmux send-keys -t {} C-c C-d
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ Don’t have Docker? Learn how to install RisingWave on Mac, Ubuntu, and other e

## Production deployments

For **single-node deployment**, please refer to [Docker Compose](https://docs.risingwave.com/docs/current/risingwave-trial/?method=docker-compose).
For **single-node deployment**, please refer to [Docker Compose](https://docs.risingwave.com/docs/current/risingwave-docker-compose/).

For **distributed deployment**, please refer to [Kubernetes with Helm](https://docs.risingwave.com/docs/current/risingwave-k8s-helm/) or [Kubernetes with Operator](https://docs.risingwave.com/docs/current/risingwave-kubernetes/).

Expand Down
28 changes: 18 additions & 10 deletions backwards-compat-tests/scripts/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -103,19 +103,21 @@ insert_json_kafka() {
local JSON=$1
echo "$JSON" | "$KAFKA_PATH"/bin/kafka-console-producer.sh \
--topic backwards_compat_test_kafka_source \
--bootstrap-server localhost:29092
--bootstrap-server localhost:29092 \
--property "parse.key=true" \
--property "key.separator=,"
}

seed_json_kafka() {
insert_json_kafka '{"timestamp": "2023-07-28 07:11:00", "user_id": 1, "page_id": 1, "action": "gtrgretrg"}'
insert_json_kafka '{"timestamp": "2023-07-28 07:11:00", "user_id": 2, "page_id": 1, "action": "fsdfgerrg"}'
insert_json_kafka '{"timestamp": "2023-07-28 07:11:00", "user_id": 3, "page_id": 1, "action": "sdfergtth"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 4, "page_id": 2, "action": "erwerhghj"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 5, "page_id": 2, "action": "kiku7ikkk"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 6, "page_id": 3, "action": "6786745ge"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 7, "page_id": 3, "action": "fgbgfnyyy"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 8, "page_id": 4, "action": "werwerwwe"}'
insert_json_kafka '{"timestamp": "2023-07-28 06:54:00", "user_id": 9, "page_id": 4, "action": "yjtyjtyyy"}'
insert_json_kafka '{"user_id": 1},{"timestamp": "2023-07-28 07:11:00", "user_id": 1, "page_id": 1, "action": "gtrgretrg"}'
insert_json_kafka '{"user_id": 2},{"timestamp": "2023-07-28 07:11:00", "user_id": 2, "page_id": 1, "action": "fsdfgerrg"}'
insert_json_kafka '{"user_id": 3},{"timestamp": "2023-07-28 07:11:00", "user_id": 3, "page_id": 1, "action": "sdfergtth"}'
insert_json_kafka '{"user_id": 4},{"timestamp": "2023-07-28 06:54:00", "user_id": 4, "page_id": 2, "action": "erwerhghj"}'
insert_json_kafka '{"user_id": 5},{"timestamp": "2023-07-28 06:54:00", "user_id": 5, "page_id": 2, "action": "kiku7ikkk"}'
insert_json_kafka '{"user_id": 6},{"timestamp": "2023-07-28 06:54:00", "user_id": 6, "page_id": 3, "action": "6786745ge"}'
insert_json_kafka '{"user_id": 7},{"timestamp": "2023-07-28 06:54:00", "user_id": 7, "page_id": 3, "action": "fgbgfnyyy"}'
insert_json_kafka '{"user_id": 8},{"timestamp": "2023-07-28 06:54:00", "user_id": 8, "page_id": 4, "action": "werwerwwe"}'
insert_json_kafka '{"user_id": 9},{"timestamp": "2023-07-28 06:54:00", "user_id": 9, "page_id": 4, "action": "yjtyjtyyy"}'
}

# https://stackoverflow.com/a/4024263
Expand Down Expand Up @@ -225,6 +227,12 @@ seed_old_cluster() {
create_kafka_topic
seed_json_kafka
sqllogictest -d dev -h localhost -p 4566 "$TEST_DIR/kafka/seed.slt"
# use the old syntax for version at most 1.5.4
if version_le "$OLD_VERSION" "1.5.4" ; then
sqllogictest -d dev -h localhost -p 4566 "$TEST_DIR/kafka/upsert/deprecate_upsert.slt"
else
sqllogictest -d dev -h localhost -p 4566 "$TEST_DIR/kafka/upsert/include_key_as.slt"
fi

echo "--- KAFKA TEST: wait 5s for kafka to process data"
sleep 5
Expand Down
16 changes: 16 additions & 0 deletions backwards-compat-tests/slt/kafka/upsert/deprecate_upsert.slt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
statement ok
CREATE TABLE IF NOT EXISTS kafka_table
(
action varchar,
user_id integer,
obj_id integer,
name varchar,
page_id integer,
age integer
)
WITH (
connector='kafka',
topic='backwards_compat_test_kafka_source',
properties.bootstrap.server='localhost:29092',
scan.startup.mode='earliest',
) FORMAT UPSERT ENCODE JSON;
18 changes: 18 additions & 0 deletions backwards-compat-tests/slt/kafka/upsert/include_key_as.slt
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
statement ok
CREATE TABLE IF NOT EXISTS kafka_table
(
action varchar,
user_id integer,
obj_id integer,
name varchar,
page_id integer,
age integer,
primary key (_rw_key)
)
INCLUDE key as _rw_key
WITH (
connector='kafka',
topic='backwards_compat_test_kafka_source',
properties.bootstrap.server='localhost:29092',
scan.startup.mode='earliest',
) FORMAT UPSERT ENCODE JSON;
13 changes: 13 additions & 0 deletions backwards-compat-tests/slt/kafka/validate_restart.slt
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,16 @@ werwerwwe 8 NULL NULL 4 NULL
yjtyjtyyy 9 NULL NULL 4 NULL
yjtyjtyyy 9 NULL NULL 4 NULL

# kafka_table should do the upsert and overwrite the existing records
query I rowsort
SELECT action, user_id, obj_id, name, page_id, age, _rw_key FROM kafka_table;
----
6786745ge 6 NULL NULL 3 NULL \x7b22757365725f6964223a20367d
erwerhghj 4 NULL NULL 2 NULL \x7b22757365725f6964223a20347d
fgbgfnyyy 7 NULL NULL 3 NULL \x7b22757365725f6964223a20377d
fsdfgerrg 2 NULL NULL 1 NULL \x7b22757365725f6964223a20327d
gtrgretrg 1 NULL NULL 1 NULL \x7b22757365725f6964223a20317d
kiku7ikkk 5 NULL NULL 2 NULL \x7b22757365725f6964223a20357d
sdfergtth 3 NULL NULL 1 NULL \x7b22757365725f6964223a20337d
werwerwwe 8 NULL NULL 4 NULL \x7b22757365725f6964223a20387d
yjtyjtyyy 9 NULL NULL 4 NULL \x7b22757365725f6964223a20397d
123 changes: 116 additions & 7 deletions ci/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,27 @@ services:
- message_queue
- elasticsearch
- clickhouse-server
- pulsar
- redis-server
- pulsar-server
- cassandra-server
- starrocks-fe-server
- starrocks-be-server
volumes:
- ..:/risingwave

sink-doris-env:
image: public.ecr.aws/x5u3w5h6/rw-build-env:v20231109
depends_on:
- doris-fe-server
- doris-be-server
volumes:
- ..:/risingwave
command: >
sh -c "sudo sysctl -w vm.max_map_count=2000000"
networks:
mynetwork:
ipv4_address: 172.121.0.4

rw-build-env:
image: public.ecr.aws/x5u3w5h6/rw-build-env:v20240213
volumes:
Expand Down Expand Up @@ -159,10 +176,96 @@ services:
expose:
- 9009

# Temporary workaround for json schema registry test since redpanda only supports
# protobuf/avro schema registry. Should be removed after the support.
# Related tracking issue:
# https://github.com/redpanda-data/redpanda/issues/1878
redis-server:
container_name: redis-server
image: 'redis:latest'
expose:
- 6379
ports:
- 6378:6379
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 30s
retries: 50

doris-fe-server:
platform: linux/amd64
image: apache/doris:2.0.0_alpha-fe-x86_64
hostname: doris-fe-server
command: >
sh -c "sudo sysctl -w vm.max_map_count=2000000"
environment:
- FE_SERVERS=fe1:172.121.0.2:9010
- FE_ID=1
ports:
- "8030:8030"
- "9030:9030"
networks:
mynetwork:
ipv4_address: 172.121.0.2

doris-be-server:
platform: linux/amd64
image: apache/doris:2.0.0_alpha-be-x86_64
hostname: doris-be-server
command: >
sh -c "sudo sysctl -w vm.max_map_count=2000000"
environment:
- FE_SERVERS=fe1:172.121.0.2:9010
- BE_ADDR=172.121.0.3:9050
depends_on:
- doris-fe-server
ports:
- "9050:9050"
networks:
mynetwork:
ipv4_address: 172.121.0.3

cassandra-server:
container_name: cassandra-server
image: cassandra:4.0
ports:
- 9042:9042
environment:
- CASSANDRA_CLUSTER_NAME=cloudinfra

starrocks-fe-server:
container_name: starrocks-fe-server
image: starrocks/fe-ubuntu:3.1.7
hostname: starrocks-fe-server
command:
/opt/starrocks/fe/bin/start_fe.sh
ports:
- 28030:8030
- 29020:9020
- 29030:9030
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9030"]
interval: 5s
timeout: 5s
retries: 30

starrocks-be-server:
image: starrocks/be-ubuntu:3.1.7
command:
- /bin/bash
- -c
- |
sleep 15s; mysql --connect-timeout 2 -h starrocks-fe-server -P9030 -uroot -e "alter system add backend \"starrocks-be-server:9050\";"
/opt/starrocks/be/bin/start_be.sh
ports:
- 28040:8040
- 29050:9050
hostname: starrocks-be-server
container_name: starrocks-be-server
depends_on:
- starrocks-fe-server

# # Temporary workaround for json schema registry test since redpanda only supports
# # protobuf/avro schema registry. Should be removed after the support.
# # Related tracking issue:
# # https://github.com/redpanda-data/redpanda/issues/1878
zookeeper:
container_name: zookeeper
image: confluentinc/cp-zookeeper:latest
Expand Down Expand Up @@ -201,8 +304,8 @@ services:
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9093,PLAINTEXT_INTERNAL://localhost:29093
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1

pulsar:
container_name: pulsar
pulsar-server:
container_name: pulsar-server
image: apachepulsar/pulsar:latest
command: bin/pulsar standalone
ports:
Expand All @@ -216,3 +319,9 @@ services:
interval: 5s
timeout: 5s
retries: 5
networks:
mynetwork:
ipam:
config:
- subnet: 172.121.80.0/16
default:
65 changes: 65 additions & 0 deletions ci/scripts/e2e-cassandra-sink-test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/usr/bin/env bash

# Exits as soon as any line fails.
set -euo pipefail

source ci/scripts/common.sh

# prepare environment
export CONNECTOR_LIBS_PATH="./connector-node/libs"

while getopts 'p:' opt; do
case ${opt} in
p )
profile=$OPTARG
;;
\? )
echo "Invalid Option: -$OPTARG" 1>&2
exit 1
;;
: )
echo "Invalid option: $OPTARG requires an argument" 1>&2
;;
esac
done
shift $((OPTIND -1))

download_and_prepare_rw "$profile" source

echo "--- Download connector node package"
buildkite-agent artifact download risingwave-connector.tar.gz ./
mkdir ./connector-node
tar xf ./risingwave-connector.tar.gz -C ./connector-node

echo "--- starting risingwave cluster"
cargo make ci-start ci-sink-test
sleep 1

echo "--- create cassandra table"
curl https://downloads.apache.org/cassandra/4.1.3/apache-cassandra-4.1.3-bin.tar.gz --output apache-cassandra-4.1.3-bin.tar.gz
tar xfvz apache-cassandra-4.1.3-bin.tar.gz
cd apache-cassandra-4.1.3/bin
export CQLSH_HOST=cassandra-server
export CQLSH_PORT=9042
./cqlsh -e "CREATE KEYSPACE demo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};use demo;
CREATE table demo_bhv_table(v1 int primary key,v2 smallint,v3 bigint,v4 float,v5 double,v6 text,v7 date,v8 timestamp,v9 boolean);"

echo "--- testing sinks"
cd ../../
sqllogictest -p 4566 -d dev './e2e_test/sink/cassandra_sink.slt'
sleep 1
cd apache-cassandra-4.1.3/bin
./cqlsh -e "COPY demo.demo_bhv_table TO './query_result.csv' WITH HEADER = false AND ENCODING = 'UTF-8';"

if cat ./query_result.csv | awk -F "," '{
exit !($1 == 1 && $2 == 1 && $3 == 1 && $4 == 1.1 && $5 == 1.2 && $6 == "test" && $7 == "2013-01-01" && $8 == "2013-01-01 01:01:01.000+0000" && $9 == "False\r"); }'; then
echo "Cassandra sink check passed"
else
cat ./query_result.csv
echo "The output is not as expected."
exit 1
fi

echo "--- Kill cluster"
cd ../../
cargo make ci-kill
18 changes: 9 additions & 9 deletions ci/scripts/e2e-clickhouse-sink-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ shift $((OPTIND -1))
download_and_prepare_rw "$profile" source

echo "--- starting risingwave cluster"
cargo make ci-start ci-clickhouse-test
cargo make ci-start ci-sink-test
sleep 1


echo "--- create clickhouse table"
curl https://clickhouse.com/ | sh
sleep 2
./clickhouse client --host=clickhouse-server --port=9000 --query="CREATE table demo_test(v1 Int32,v2 Int64,v3 String)ENGINE = ReplacingMergeTree PRIMARY KEY (v1);"
./clickhouse client --host=clickhouse-server --port=9000 --query="CREATE table demo_test(v1 Int32,v2 Int64,v3 String,v4 Enum16('A'=1,'B'=2))ENGINE = ReplacingMergeTree PRIMARY KEY (v1);"

echo "--- testing sinks"
sqllogictest -p 4566 -d dev './e2e_test/sink/clickhouse_sink.slt'
Expand All @@ -41,13 +41,13 @@ sleep 5

# check sink destination using shell
if cat ./query_result.csv | sort | awk -F "," '{
if ($1 == 1 && $2 == 50 && $3 == "\"1-50\"") c1++;
if ($1 == 13 && $2 == 2 && $3 == "\"13-2\"") c2++;
if ($1 == 2 && $2 == 2 && $3 == "\"2-2\"") c3++;
if ($1 == 21 && $2 == 2 && $3 == "\"21-2\"") c4++;
if ($1 == 3 && $2 == 2 && $3 == "\"3-2\"") c5++;
if ($1 == 5 && $2 == 2 && $3 == "\"5-2\"") c6++;
if ($1 == 8 && $2 == 2 && $3 == "\"8-2\"") c7++; }
if ($1 == 1 && $2 == 50 && $3 == "\"1-50\"" && $4 == "\"A\"") c1++;
if ($1 == 13 && $2 == 2 && $3 == "\"13-2\"" && $4 == "\"B\"") c2++;
if ($1 == 2 && $2 == 2 && $3 == "\"2-2\"" && $4 == "\"B\"") c3++;
if ($1 == 21 && $2 == 2 && $3 == "\"21-2\"" && $4 == "\"A\"") c4++;
if ($1 == 3 && $2 == 2 && $3 == "\"3-2\"" && $4 == "\"A\"") c5++;
if ($1 == 5 && $2 == 2 && $3 == "\"5-2\"" && $4 == "\"B\"") c6++;
if ($1 == 8 && $2 == 2 && $3 == "\"8-2\"" && $4 == "\"A\"") c7++; }
END { exit !(c1 == 1 && c2 == 1 && c3 == 1 && c4 == 1 && c5 == 1 && c6 == 1 && c7 == 1); }'; then
echo "Clickhouse sink check passed"
else
Expand Down
Loading

0 comments on commit ae7d7ba

Please sign in to comment.