From ff60a7f95e1efb616cf1b6c9c9ffd314d304a2e4 Mon Sep 17 00:00:00 2001 From: Amos Folarin Date: Fri, 9 Dec 2016 12:32:19 +0000 Subject: [PATCH 001/197] move to confluent v3.1.1 and radar components --- .../radar-cp-stack/docker-compose.yml | 54 +++++++++++++++---- 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index f1b8c4cf2..d108b7e8e 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -7,7 +7,7 @@ services: # *NB Zookeeper clust should prob go on separate network # #---------------------------------------------------------------------------# zookeeper-1: - image: confluentinc/cp-zookeeper:3.1.0 + image: confluentinc/cp-zookeeper:3.1.1 environment: ZOOKEEPER_SERVER_ID: 1 ZOOKEEPER_CLIENT_PORT: 22181 @@ -18,7 +18,7 @@ services: network_mode: host zookeeper-2: - image: confluentinc/cp-zookeeper:3.1.0 + image: confluentinc/cp-zookeeper:3.1.1 environment: ZOOKEEPER_SERVER_ID: 2 ZOOKEEPER_CLIENT_PORT: 32181 @@ -29,7 +29,7 @@ services: network_mode: host zookeeper-3: - image: confluentinc/cp-zookeeper:3.1.0 + image: confluentinc/cp-zookeeper:3.1.1 environment: ZOOKEEPER_SERVER_ID: 3 ZOOKEEPER_CLIENT_PORT: 42181 @@ -44,7 +44,7 @@ services: # Kafka Cluster # #---------------------------------------------------------------------------# kafka-1: - image: confluentinc/cp-kafka:3.1.0 + image: confluentinc/cp-kafka:3.1.1 network_mode: host depends_on: - zookeeper-1 @@ -56,7 +56,7 @@ services: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092 kafka-2: - image: confluentinc/cp-kafka:3.1.0 + image: confluentinc/cp-kafka:3.1.1 network_mode: host depends_on: - zookeeper-1 @@ -68,7 +68,7 @@ services: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092 kafka-3: - image: confluentinc/cp-kafka:3.1.0 + image: confluentinc/cp-kafka:3.1.1 network_mode: host depends_on: - zookeeper-1 @@ -79,13 +79,16 @@ services: KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092 - + #---------------------------------------------------------------------------# + # Kafka Connector # + #---------------------------------------------------------------------------# + #---------------------------------------------------------------------------# # Schema Registry # #---------------------------------------------------------------------------# schema-registry-1: - image: confluentinc/cp-schema-registry:3.1.0 + image: confluentinc/cp-schema-registry:3.1.1 network_mode: host depends_on: - "zookeeper-1" @@ -104,7 +107,7 @@ services: # REST proxy # #---------------------------------------------------------------------------# rest-proxy-1: - image: confluentinc/cp-kafka-rest:3.1.0 + image: confluentinc/cp-kafka-rest:3.1.1 network_mode: host depends_on: - "kafka-1" @@ -122,3 +125,36 @@ services: KAFKA_REST_HOST_NAME: "localhost" + + + #---------------------------------------------------------------------------# + # RADRA Storage # + #---------------------------------------------------------------------------# + mongo: + image: mongo:3.2.10 + ports: + - "27017:27017" + + #---------------------------------------------------------------------------# + # RADAR REST API # + #---------------------------------------------------------------------------# + tomcat: + image: tomcat:8.0.37 + ports: + - "8080:8080" + depends_on: + - mongo + + #---------------------------------------------------------------------------# + # RADRA Dashboard # + #---------------------------------------------------------------------------# +# nodejs: +# image: node:7.0.0 +# ports: +# - "80:8888" +# depends_on: +# - tomcat + + + + From 8353001419a54bc826f73890a8163c03df273b6b Mon Sep 17 00:00:00 2001 From: Amos Folarin Date: Fri, 9 Dec 2016 13:20:27 +0000 Subject: [PATCH 002/197] add kafka connector to the stack --- .../radar-cp-stack/docker-compose.yml | 38 ++++++++++++++++--- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index d108b7e8e..3d850cfdd 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -79,11 +79,6 @@ services: KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092 - #---------------------------------------------------------------------------# - # Kafka Connector # - #---------------------------------------------------------------------------# - - #---------------------------------------------------------------------------# # Schema Registry # #---------------------------------------------------------------------------# @@ -103,6 +98,39 @@ services: SCHEMA_REGISTRY_HOST_NAME: "localhost" SCHEMA_REGISTRY_LISTENERS: "http://localhost:8081" + #---------------------------------------------------------------------------# + # Kafka Connector # + #---------------------------------------------------------------------------# + connect: + image: confluentinc/cp-kafka-connect:3.1.1 + network_mode: host + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: 'localhost:19092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_REST_PORT: 8083 + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter + CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + CONNECT_ZOOKEEPER_CONNECT: 'zookeeper-1:2181' + + #---------------------------------------------------------------------------# # REST proxy # #---------------------------------------------------------------------------# From de8a862b82dceb82001569e475dcae20006ba28d Mon Sep 17 00:00:00 2001 From: Amos Folarin Date: Tue, 3 Jan 2017 16:03:11 +0000 Subject: [PATCH 003/197] schema registry container so depends_on kafka --- dcompose-stack/radar-cp-stack/docker-compose.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index 3d850cfdd..76d1c6103 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -86,9 +86,9 @@ services: image: confluentinc/cp-schema-registry:3.1.1 network_mode: host depends_on: - - "zookeeper-1" - - "zookeeper-2" - - "zookeeper-3" + - "kafka-1" + - "kafka-2" + - "kafka-3" restart: always ports: - "8081:8081" From 4bf345fd722ee0e9d8b081989f256f601c9c7d29 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 4 Jan 2017 11:20:44 +0000 Subject: [PATCH 004/197] modified dependencies container order for all containers and zookeeper references for schema-registry --- .../radar-cp-stack/docker-compose.yml | 121 ++++++++---------- 1 file changed, 54 insertions(+), 67 deletions(-) diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index 76d1c6103..0b0d89adb 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -39,7 +39,6 @@ services: ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888 network_mode: host - #---------------------------------------------------------------------------# # Kafka Cluster # #---------------------------------------------------------------------------# @@ -59,9 +58,7 @@ services: image: confluentinc/cp-kafka:3.1.1 network_mode: host depends_on: - - zookeeper-1 - - zookeeper-2 - - zookeeper-3 + - kafka-1 environment: KAFKA_BROKER_ID: 2 KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 @@ -71,9 +68,7 @@ services: image: confluentinc/cp-kafka:3.1.1 network_mode: host depends_on: - - zookeeper-1 - - zookeeper-2 - - zookeeper-3 + - kafka-2 environment: KAFKA_BROKER_ID: 3 KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 @@ -86,50 +81,49 @@ services: image: confluentinc/cp-schema-registry:3.1.1 network_mode: host depends_on: - - "kafka-1" - - "kafka-2" - - "kafka-3" + - kafka-1 + - kafka-2 + - kafka-3 restart: always ports: - "8081:8081" environment: #SR_KAFKASTORE_CONNECTION_URL: "zookeeper-sasl-1:2181" - SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "localhost:32181" - SCHEMA_REGISTRY_HOST_NAME: "localhost" - SCHEMA_REGISTRY_LISTENERS: "http://localhost:8081" + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: localhost:32181 + SCHEMA_REGISTRY_HOST_NAME: localhost + SCHEMA_REGISTRY_LISTENERS: http://localhost:8081 #---------------------------------------------------------------------------# # Kafka Connector # #---------------------------------------------------------------------------# - connect: - image: confluentinc/cp-kafka-connect:3.1.1 - network_mode: host - depends_on: - - zookeeper-1 - - zookeeper-2 - - zookeeper-3 - - kafka-1 - - kafka-2 - - kafka-3 - - schema-registry-1 - ports: - - "8083:8083" - environment: - CONNECT_BOOTSTRAP_SERVERS: 'localhost:19092' - CONNECT_REST_ADVERTISED_HOST_NAME: connect - CONNECT_REST_PORT: 8083 - CONNECT_GROUP_ID: compose-connect-group - CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs - CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets - CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status - CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' - CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' - CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter - CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - CONNECT_ZOOKEEPER_CONNECT: 'zookeeper-1:2181' - +# connect: +# image: confluentinc/cp-kafka-connect:3.1.1 +# network_mode: host +# depends_on: +# - zookeeper-1 +# - zookeeper-2 +# - zookeeper-3 +# - kafka-1 +# - kafka-2 +# - kafka-3 +# - schema-registry-1 +# ports: +# - "8083:8083" +# environment: +# CONNECT_BOOTSTRAP_SERVERS: localhost:19092,localhost:29092,localhost:39092 +# CONNECT_REST_ADVERTISED_HOST_NAME: connect +# CONNECT_REST_PORT: 8083 +# CONNECT_GROUP_ID: compose-connect-group +# CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs +# CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets +# CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status +# CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter +# CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' +# CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter +# CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' +# CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter +# CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter +# CONNECT_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 #---------------------------------------------------------------------------# # REST proxy # @@ -138,40 +132,37 @@ services: image: confluentinc/cp-kafka-rest:3.1.1 network_mode: host depends_on: - - "kafka-1" - - "kafka-2" - - "kafka-3" - - "schema-registry-1" + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 ports: - "8082:8082" environment: #RP_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" #RP_ZOOKEEPER_CONNECT: "zookeeper-sasl-1:2181" - KAFKA_REST_ZOOKEEPER_CONNECT: "localhost:32181" - KAFKA_REST_LISTENERS: "http://localhost:8082" - KAFKA_REST_SCHEMA_REGISTRY_URL: "http://localhost:8081" - KAFKA_REST_HOST_NAME: "localhost" - - - + KAFKA_REST_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 + KAFKA_REST_LISTENERS: http://localhost:8082 + KAFKA_REST_SCHEMA_REGISTRY_URL: http://localhost:8081 + KAFKA_REST_HOST_NAME: localhost #---------------------------------------------------------------------------# # RADRA Storage # #---------------------------------------------------------------------------# - mongo: - image: mongo:3.2.10 - ports: - - "27017:27017" +# mongo: +# image: mongo:3.2.10 +# ports: +# - "27017:27017" #---------------------------------------------------------------------------# # RADAR REST API # #---------------------------------------------------------------------------# - tomcat: - image: tomcat:8.0.37 - ports: - - "8080:8080" - depends_on: - - mongo +# tomcat: +# image: tomcat:8.0.37 +# ports: +# - "8080:8080" +# depends_on: +# - mongo #---------------------------------------------------------------------------# # RADRA Dashboard # @@ -182,7 +173,3 @@ services: # - "80:8888" # depends_on: # - tomcat - - - - From 1396d8c1f458e61a1f2156e39cef48bb0f8e13b3 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 4 Jan 2017 14:46:30 +0000 Subject: [PATCH 005/197] Added connector image --- .../radar-cp-stack/docker-compose.yml | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index 0b0d89adb..21f0166cd 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -96,34 +96,34 @@ services: #---------------------------------------------------------------------------# # Kafka Connector # #---------------------------------------------------------------------------# -# connect: -# image: confluentinc/cp-kafka-connect:3.1.1 -# network_mode: host -# depends_on: -# - zookeeper-1 -# - zookeeper-2 -# - zookeeper-3 -# - kafka-1 -# - kafka-2 -# - kafka-3 -# - schema-registry-1 -# ports: -# - "8083:8083" -# environment: -# CONNECT_BOOTSTRAP_SERVERS: localhost:19092,localhost:29092,localhost:39092 -# CONNECT_REST_ADVERTISED_HOST_NAME: connect -# CONNECT_REST_PORT: 8083 -# CONNECT_GROUP_ID: compose-connect-group -# CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs -# CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets -# CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status -# CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter -# CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' -# CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter -# CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' -# CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter -# CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter -# CONNECT_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 + connect: + image: confluentinc/cp-kafka-connect:3.1.1 + network_mode: host + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: localhost:19092,localhost:29092,localhost:39092 + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_REST_PORT: 8083 + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter + CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + CONNECT_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 #---------------------------------------------------------------------------# # REST proxy # From 8b7ce616ba1d1b2259ac83562511ece29cadb3ae Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 11:41:20 +0100 Subject: [PATCH 006/197] First stab at HDFS docker containers --- .../radar-cp-hadoop-stack/README.md | 8 ++++ .../radar-cp-hadoop-stack/docker-compose.yml | 37 +++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/README.md create mode 100644 dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md new file mode 100644 index 000000000..077ae3bdf --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -0,0 +1,8 @@ +# RADAR-CNS with a HDFS connector + +In the Dockerfile 3 HDFS volumes are mounted. Create those before running: +``` +mkdir -p /usr/local/var/lib/docker/hadoop-data1 +mkdir -p /usr/local/var/lib/docker/hadoop-data2 +mkdir -p /usr/local/var/lib/docker/hadoop-data3 +``` diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml new file mode 100644 index 000000000..fb5b67ece --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -0,0 +1,37 @@ +datanode1: + image: uhopper/hadoop-datanode + domainname: hadoop + net: hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 +datanode2: + image: uhopper/hadoop-datanode + domainname: hadoop + net: hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 +datanode3: + image: uhopper/hadoop-datanode + domainname: hadoop + net: hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data3:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 +namenode: + image: uhopper/hadoop-namenode + hostname: namenode + container_name: namenode + domainname: hadoop + net: hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name + environment: + - CLUSTER_NAME=radar-cns From a0d20e046d677e90fb3527d2418c210569a97b8d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 12:50:53 +0100 Subject: [PATCH 007/197] Hadoop setup with extraction working --- .../radar-cp-hadoop-stack/README.md | 24 ++++-- .../radar-cp-hadoop-stack/docker-compose.yml | 83 ++++++++++--------- 2 files changed, 65 insertions(+), 42 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 077ae3bdf..ce2fd3235 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,8 +1,22 @@ # RADAR-CNS with a HDFS connector -In the Dockerfile 3 HDFS volumes are mounted. Create those before running: -``` -mkdir -p /usr/local/var/lib/docker/hadoop-data1 -mkdir -p /usr/local/var/lib/docker/hadoop-data2 -mkdir -p /usr/local/var/lib/docker/hadoop-data3 +In the Dockerfile 3 HDFS volumes and the name directory are mounted. Create those before running. Also create a docker `hadoop` network. + +```shell +DATA_DIR=/usr/local/var/lib/docker +mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-data3" "$DATA_DIR/hadoop-name" +docker network create hadoop ``` + +Data can be extracted from this setup by running +```shell +# Directory to write output to +OUTPUT_DIR=$PWD/output +# HDFS filename to get +HDFS_FILE=/abc/test.txt +# HDFS command to run +HDFS_COMMAND="hdfs dfs -get $HDFS_FILE /home/output" + +mkdir -p $OUTPUT_DIR +docker run --rm --network hadoop -v "$OUTPUT_DIR:/home/output" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop $HDFS_COMMAND +``` diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index fb5b67ece..941cfeff7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,37 +1,46 @@ -datanode1: - image: uhopper/hadoop-datanode - domainname: hadoop - net: hadoop - volumes: - - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data - environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 -datanode2: - image: uhopper/hadoop-datanode - domainname: hadoop - net: hadoop - volumes: - - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data - environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 -datanode3: - image: uhopper/hadoop-datanode - domainname: hadoop - net: hadoop - volumes: - - /usr/local/var/lib/docker/hadoop-data3:/hadoop/dfs/data - environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 -namenode: - image: uhopper/hadoop-namenode - hostname: namenode - container_name: namenode - domainname: hadoop - net: hadoop - volumes: - - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name - environment: - - CLUSTER_NAME=radar-cns +version: '2' +services: + datanode1: + image: uhopper/hadoop-datanode + domainname: hadoop + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 + datanode2: + image: uhopper/hadoop-datanode + domainname: hadoop + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 + datanode3: + image: uhopper/hadoop-datanode + domainname: hadoop + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-data3:/hadoop/dfs/data + environment: + - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + - HDFS_CONF_dfs_replication=3 + namenode: + image: uhopper/hadoop-namenode + hostname: namenode + container_name: namenode + domainname: hadoop + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name + environment: + - CLUSTER_NAME=radar-cns +networks: + hadoop: + external: true From c3b89b013e2558548890591de3031aaeb12ed069 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 13:44:33 +0100 Subject: [PATCH 008/197] Make data extraction a separate command --- dcompose-stack/radar-cp-hadoop-stack/README.md | 18 ++++++------------ .../radar-cp-hadoop-stack/docker-compose.yml | 16 ++++------------ .../radar-cp-hadoop-stack/extract_from_hdfs.sh | 18 ++++++++++++++++++ 3 files changed, 28 insertions(+), 24 deletions(-) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index ce2fd3235..bf378aad0 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,22 +1,16 @@ # RADAR-CNS with a HDFS connector -In the Dockerfile 3 HDFS volumes and the name directory are mounted. Create those before running. Also create a docker `hadoop` network. +In the Dockerfile, 2 redundant HDFS volumes and the name directory are mounted. The local paths for those volumes have to be created before the first run. Also, create a docker `hadoop` network. ```shell DATA_DIR=/usr/local/var/lib/docker -mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-data3" "$DATA_DIR/hadoop-name" +mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-name" docker network create hadoop ``` -Data can be extracted from this setup by running -```shell -# Directory to write output to -OUTPUT_DIR=$PWD/output -# HDFS filename to get -HDFS_FILE=/abc/test.txt -# HDFS command to run -HDFS_COMMAND="hdfs dfs -get $HDFS_FILE /home/output" +Data can be extracted from this setup by running: -mkdir -p $OUTPUT_DIR -docker run --rm --network hadoop -v "$OUTPUT_DIR:/home/output" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop $HDFS_COMMAND +```shell +./extract_from_hdfs ``` +This command will not overwrite data in the destination directory. diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 941cfeff7..7647290bb 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,4 +1,5 @@ version: '2' + services: datanode1: image: uhopper/hadoop-datanode @@ -9,7 +10,7 @@ services: - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data environment: - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 + - HDFS_CONF_dfs_replication=2 datanode2: image: uhopper/hadoop-datanode domainname: hadoop @@ -19,17 +20,7 @@ services: - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data environment: - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 - datanode3: - image: uhopper/hadoop-datanode - domainname: hadoop - networks: - - hadoop - volumes: - - /usr/local/var/lib/docker/hadoop-data3:/hadoop/dfs/data - environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=3 + - HDFS_CONF_dfs_replication=2 namenode: image: uhopper/hadoop-namenode hostname: namenode @@ -41,6 +32,7 @@ services: - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name environment: - CLUSTER_NAME=radar-cns + networks: hadoop: external: true diff --git a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh new file mode 100755 index 000000000..fa7cde875 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ $# -lt 1 || $1 = "-h" || $1 = "--help" ]]; then + printf "Usage:\n$0 []\nThe destination directory defaults to ./output\n" + exit 1 +fi + +# HDFS filename to get +HDFS_FILE=$1 +# Directory to write output to +OUTPUT_DIR=${2:-$PWD/output} +# Internal docker directory to write output to +HDFS_OUTPUT_DIR=/home/output +# HDFS command to run +HDFS_COMMAND="hdfs dfs -get $HDFS_FILE $HDFS_OUTPUT_DIR" + +mkdir -p $OUTPUT_DIR +docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop $HDFS_COMMAND From 37eb4af9172c2000b2eea7eec0137518d93b038d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 14:52:17 +0100 Subject: [PATCH 009/197] Use kafka docker network instead of --net=host It turns out the docker images function fine when using their own network instead of the host network. This way the docker setup is much less liable to attacks (ports are not opened), and it also works on Mac. --- .../radar-cp-hadoop-stack/docker-compose.yml | 108 +++++++++++++++++- 1 file changed, 106 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 7647290bb..05abc85d2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,7 +1,107 @@ version: '2' services: - datanode1: + + zookeeper-1: + image: confluentinc/cp-zookeeper:3.1.1 + domainname: zookeeper + container_name: zookeeper-1 + networks: + - kafka + environment: + ZOOKEEPER_SERVER_ID: 1 + ZOOKEEPER_CLIENT_PORT: 22181 + ZOOKEEPER_TICK_TIME: 2000 + ZOOKEEPER_INIT_LIMIT: 5 + ZOOKEEPER_SYNC_LIMIT: 2 + ZOOKEEPER_SERVERS: zookeeper-1:22888:23888 + + #---------------------------------------------------------------------------# + # Kafka Cluster # + #---------------------------------------------------------------------------# + kafka-1: + image: confluentinc/cp-kafka:3.1.1 + domainname: kafka + container_name: kafka-1 + networks: + - kafka + depends_on: + - zookeeper-1 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:19092 + + kafka-2: + image: confluentinc/cp-kafka:3.1.1 + domainname: kafka + container_name: kafka-2 + networks: + - kafka + depends_on: + - zookeeper-1 + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:19092 + + kafka-3: + image: confluentinc/cp-kafka:3.1.1 + domainname: kafka + container_name: kafka-3 + networks: + - kafka + depends_on: + - zookeeper-1 + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:19092 + + #---------------------------------------------------------------------------# + # Schema Registry # + #---------------------------------------------------------------------------# + schema-registry-1: + image: confluentinc/cp-schema-registry:3.1.1 + container_name: schema-registry-1 + domainname: kafka + networks: + - kafka + depends_on: + - kafka-1 + - kafka-2 + - kafka-3 + restart: always + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:22181 + SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 + SCHEMA_REGISTRY_LISTENERS: http://schema-registry-1:8081 + + #---------------------------------------------------------------------------# + # REST proxy # + #---------------------------------------------------------------------------# + rest-proxy-1: + image: confluentinc/cp-kafka-rest:3.1.1 + domainname: kafka + container_name: rest-proxy-1 + networks: + - kafka + depends_on: + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + ports: + - "8082:8082" + environment: + KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:22181 + KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 + KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 + KAFKA_REST_HOST_NAME: rest-proxy-1 + + datanode-1: image: uhopper/hadoop-datanode domainname: hadoop networks: @@ -11,7 +111,7 @@ services: environment: - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - HDFS_CONF_dfs_replication=2 - datanode2: + datanode-2: image: uhopper/hadoop-datanode domainname: hadoop networks: @@ -36,3 +136,7 @@ services: networks: hadoop: external: true + zookeeper: + driver: bridge + kafka: + driver: bridge From 1ca6a7a8d9776523584eafbe0dca076bc2c3028b Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 15:04:36 +0100 Subject: [PATCH 010/197] Tweaks - Using hadoop version 2.7.2 everywhere - moved network list to the top - added tomcat/nodejs/mongo - added some more documentation headers --- .../radar-cp-hadoop-stack/docker-compose.yml | 66 +++++++++++++++---- .../extract_from_hdfs.sh | 2 +- 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 05abc85d2..ab37c5a36 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,13 +1,26 @@ version: '2' +networks: + hadoop: + external: true + zookeeper: + driver: bridge + api: + driver: bridge + kafka: + driver: bridge + services: + #---------------------------------------------------------------------------# + # Zookeeper Cluster + #---------------------------------------------------------------------------# zookeeper-1: image: confluentinc/cp-zookeeper:3.1.1 domainname: zookeeper container_name: zookeeper-1 networks: - - kafka + - zookeeper environment: ZOOKEEPER_SERVER_ID: 1 ZOOKEEPER_CLIENT_PORT: 22181 @@ -25,6 +38,7 @@ services: container_name: kafka-1 networks: - kafka + - zookeeper depends_on: - zookeeper-1 environment: @@ -38,6 +52,7 @@ services: container_name: kafka-2 networks: - kafka + - zookeeper depends_on: - zookeeper-1 environment: @@ -51,6 +66,7 @@ services: container_name: kafka-3 networks: - kafka + - zookeeper depends_on: - zookeeper-1 environment: @@ -67,6 +83,7 @@ services: domainname: kafka networks: - kafka + - zookeeper depends_on: - kafka-1 - kafka-2 @@ -88,6 +105,7 @@ services: container_name: rest-proxy-1 networks: - kafka + - zookeeper depends_on: - kafka-1 - kafka-2 @@ -101,8 +119,41 @@ services: KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 KAFKA_REST_HOST_NAME: rest-proxy-1 + + #---------------------------------------------------------------------------# + # RADAR Hot-Storage # + #---------------------------------------------------------------------------# + mongo: + image: mongo:3.2.10 + ports: + - "27017:27017" + + #---------------------------------------------------------------------------# + # RADAR REST API # + #---------------------------------------------------------------------------# + tomcat: + image: tomcat:8.0.37 + ports: + - "8080:8080" + depends_on: + - mongo + + #---------------------------------------------------------------------------# + # RADAR Dashboard # + #---------------------------------------------------------------------------# + nodejs: + image: node:7.0.0 + ports: + - "80:8888" + depends_on: + - tomcat + + + #---------------------------------------------------------------------------# + # RADAR Cold Storage # + #---------------------------------------------------------------------------# datanode-1: - image: uhopper/hadoop-datanode + image: uhopper/hadoop-datanode:2.7.2 domainname: hadoop networks: - hadoop @@ -112,7 +163,7 @@ services: - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - HDFS_CONF_dfs_replication=2 datanode-2: - image: uhopper/hadoop-datanode + image: uhopper/hadoop-datanode:2.7.2 domainname: hadoop networks: - hadoop @@ -122,7 +173,7 @@ services: - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - HDFS_CONF_dfs_replication=2 namenode: - image: uhopper/hadoop-namenode + image: uhopper/hadoop-namenode:2.7.2 hostname: namenode container_name: namenode domainname: hadoop @@ -133,10 +184,3 @@ services: environment: - CLUSTER_NAME=radar-cns -networks: - hadoop: - external: true - zookeeper: - driver: bridge - kafka: - driver: bridge diff --git a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh index fa7cde875..fbe46e2a3 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh @@ -15,4 +15,4 @@ HDFS_OUTPUT_DIR=/home/output HDFS_COMMAND="hdfs dfs -get $HDFS_FILE $HDFS_OUTPUT_DIR" mkdir -p $OUTPUT_DIR -docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop $HDFS_COMMAND +docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop:2.7.2 $HDFS_COMMAND From 406eaeb8fddc84ca9601a989744412d3dc6939fa Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 9 Jan 2017 15:51:53 +0100 Subject: [PATCH 011/197] Removed fixed container and domain name --- .../radar-cp-hadoop-stack/docker-compose.yml | 55 +++++----- .../radar-cp-stack/docker-compose.yml | 103 +++++++++++------- 2 files changed, 90 insertions(+), 68 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index ab37c5a36..74d59f020 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,41 +1,38 @@ +--- version: '2' networks: - hadoop: - external: true zookeeper: driver: bridge - api: - driver: bridge kafka: driver: bridge + api: + driver: bridge + hadoop: + external: true services: #---------------------------------------------------------------------------# - # Zookeeper Cluster + # Zookeeper Cluster # #---------------------------------------------------------------------------# zookeeper-1: image: confluentinc/cp-zookeeper:3.1.1 - domainname: zookeeper - container_name: zookeeper-1 networks: - zookeeper environment: ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_CLIENT_PORT: 22181 + ZOOKEEPER_CLIENT_PORT: 2181 ZOOKEEPER_TICK_TIME: 2000 ZOOKEEPER_INIT_LIMIT: 5 ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: zookeeper-1:22888:23888 + ZOOKEEPER_SERVERS: zookeeper-1:2888:3888 #---------------------------------------------------------------------------# # Kafka Cluster # #---------------------------------------------------------------------------# kafka-1: image: confluentinc/cp-kafka:3.1.1 - domainname: kafka - container_name: kafka-1 networks: - kafka - zookeeper @@ -43,44 +40,38 @@ services: - zookeeper-1 environment: KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:19092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 kafka-2: image: confluentinc/cp-kafka:3.1.1 - domainname: kafka - container_name: kafka-2 networks: - kafka - zookeeper depends_on: - - zookeeper-1 + - kafka-1 environment: KAFKA_BROKER_ID: 2 - KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:19092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 kafka-3: image: confluentinc/cp-kafka:3.1.1 - domainname: kafka - container_name: kafka-3 networks: - kafka - zookeeper depends_on: - - zookeeper-1 + - kafka-2 environment: KAFKA_BROKER_ID: 3 - KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:22181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:19092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092 #---------------------------------------------------------------------------# # Schema Registry # #---------------------------------------------------------------------------# schema-registry-1: image: confluentinc/cp-schema-registry:3.1.1 - container_name: schema-registry-1 - domainname: kafka networks: - kafka - zookeeper @@ -92,7 +83,7 @@ services: ports: - "8081:8081" environment: - SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:22181 + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:2181 SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 SCHEMA_REGISTRY_LISTENERS: http://schema-registry-1:8081 @@ -101,8 +92,6 @@ services: #---------------------------------------------------------------------------# rest-proxy-1: image: confluentinc/cp-kafka-rest:3.1.1 - domainname: kafka - container_name: rest-proxy-1 networks: - kafka - zookeeper @@ -114,17 +103,19 @@ services: ports: - "8082:8082" environment: - KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:22181 + KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 KAFKA_REST_HOST_NAME: rest-proxy-1 #---------------------------------------------------------------------------# - # RADAR Hot-Storage # + # RADAR Hot Storage # #---------------------------------------------------------------------------# mongo: image: mongo:3.2.10 + networks: + - api ports: - "27017:27017" @@ -133,6 +124,8 @@ services: #---------------------------------------------------------------------------# tomcat: image: tomcat:8.0.37 + networks: + - api ports: - "8080:8080" depends_on: @@ -143,6 +136,8 @@ services: #---------------------------------------------------------------------------# nodejs: image: node:7.0.0 + networks: + - api ports: - "80:8888" depends_on: diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index 21f0166cd..94a1b5def 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -1,85 +1,104 @@ --- version: '2' + +networks: + zookeeper: + driver: bridge + kafka: + driver: bridge + api: + driver: bridge + services: #---------------------------------------------------------------------------# # Zookeeper Cluster # - # *NB Zookeeper clust should prob go on separate network # #---------------------------------------------------------------------------# zookeeper-1: image: confluentinc/cp-zookeeper:3.1.1 + networks: + - zookeeper environment: ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_CLIENT_PORT: 22181 + ZOOKEEPER_CLIENT_PORT: 2181 ZOOKEEPER_TICK_TIME: 2000 ZOOKEEPER_INIT_LIMIT: 5 ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888 - network_mode: host + ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888 zookeeper-2: image: confluentinc/cp-zookeeper:3.1.1 + networks: + - zookeeper environment: ZOOKEEPER_SERVER_ID: 2 - ZOOKEEPER_CLIENT_PORT: 32181 + ZOOKEEPER_CLIENT_PORT: 2181 ZOOKEEPER_TICK_TIME: 2000 ZOOKEEPER_INIT_LIMIT: 5 ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888 - network_mode: host + ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888 zookeeper-3: image: confluentinc/cp-zookeeper:3.1.1 + networks: + - zookeeper environment: ZOOKEEPER_SERVER_ID: 3 - ZOOKEEPER_CLIENT_PORT: 42181 + ZOOKEEPER_CLIENT_PORT: 2181 ZOOKEEPER_TICK_TIME: 2000 ZOOKEEPER_INIT_LIMIT: 5 ZOOKEEPER_SYNC_LIMIT: 2 - ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888 - network_mode: host + ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888 #---------------------------------------------------------------------------# # Kafka Cluster # #---------------------------------------------------------------------------# kafka-1: image: confluentinc/cp-kafka:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - zookeeper-1 - zookeeper-2 - zookeeper-3 environment: KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 kafka-2: image: confluentinc/cp-kafka:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - kafka-1 environment: KAFKA_BROKER_ID: 2 - KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 kafka-3: image: confluentinc/cp-kafka:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - kafka-2 environment: KAFKA_BROKER_ID: 3 - KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092 #---------------------------------------------------------------------------# # Schema Registry # #---------------------------------------------------------------------------# schema-registry-1: image: confluentinc/cp-schema-registry:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - kafka-1 - kafka-2 @@ -88,17 +107,18 @@ services: ports: - "8081:8081" environment: - #SR_KAFKASTORE_CONNECTION_URL: "zookeeper-sasl-1:2181" - SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: localhost:32181 - SCHEMA_REGISTRY_HOST_NAME: localhost - SCHEMA_REGISTRY_LISTENERS: http://localhost:8081 + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:2181 + SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 + SCHEMA_REGISTRY_LISTENERS: http://schema-registry-1:8081 #---------------------------------------------------------------------------# # Kafka Connector # #---------------------------------------------------------------------------# connect: image: confluentinc/cp-kafka-connect:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - zookeeper-1 - zookeeper-2 @@ -110,7 +130,7 @@ services: ports: - "8083:8083" environment: - CONNECT_BOOTSTRAP_SERVERS: localhost:19092,localhost:29092,localhost:39092 + CONNECT_BOOTSTRAP_SERVERS: kafka-1:9092,kafka-2:9092,kafka-3:9092 CONNECT_REST_ADVERTISED_HOST_NAME: connect CONNECT_REST_PORT: 8083 CONNECT_GROUP_ID: compose-connect-group @@ -118,19 +138,21 @@ services: CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry-1:8081' CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://localhost:8081' + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry-1:8081' CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - CONNECT_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 + CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 #---------------------------------------------------------------------------# # REST proxy # #---------------------------------------------------------------------------# rest-proxy-1: image: confluentinc/cp-kafka-rest:3.1.1 - network_mode: host + networks: + - kafka + - zookeeper depends_on: - kafka-1 - kafka-2 @@ -139,18 +161,19 @@ services: ports: - "8082:8082" environment: - #RP_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" - #RP_ZOOKEEPER_CONNECT: "zookeeper-sasl-1:2181" - KAFKA_REST_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181 - KAFKA_REST_LISTENERS: http://localhost:8082 - KAFKA_REST_SCHEMA_REGISTRY_URL: http://localhost:8081 - KAFKA_REST_HOST_NAME: localhost + KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181 + KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 + KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 + KAFKA_REST_HOST_NAME: rest-proxy-1 + #---------------------------------------------------------------------------# - # RADRA Storage # + # RADAR Hot Storage # #---------------------------------------------------------------------------# # mongo: # image: mongo:3.2.10 +# networks: +# - api # ports: # - "27017:27017" @@ -159,16 +182,20 @@ services: #---------------------------------------------------------------------------# # tomcat: # image: tomcat:8.0.37 +# networks: +# - api # ports: # - "8080:8080" # depends_on: # - mongo #---------------------------------------------------------------------------# - # RADRA Dashboard # + # RADAR Dashboard # #---------------------------------------------------------------------------# # nodejs: # image: node:7.0.0 +# networks: +# - api # ports: # - "80:8888" # depends_on: From 29dc38f492050ff9937cfe92cb2bca7958ace65e Mon Sep 17 00:00:00 2001 From: "Amos Folarin (home)" Date: Mon, 9 Jan 2017 16:31:02 +0000 Subject: [PATCH 012/197] add dashboard container to docker-compose.yml --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 74d59f020..b307bd65a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -135,11 +135,11 @@ services: # RADAR Dashboard # #---------------------------------------------------------------------------# nodejs: - image: node:7.0.0 + image: radarcns/radar-dashboard:latest networks: - api ports: - - "80:8888" + - "3000:3000" depends_on: - tomcat From 0753a7ac879e2939c78a3394ef031f3347f31519 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 10 Jan 2017 16:38:45 +0100 Subject: [PATCH 013/197] Added docker-compose command in README --- dcompose-stack/radar-cp-hadoop-stack/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index bf378aad0..7ba0a358e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -8,6 +8,11 @@ mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-nam docker network create hadoop ``` +Run the setup with +```shell +sudo docker-compose up -d +``` + Data can be extracted from this setup by running: ```shell From 30bb34d6acfb604664efaadda97efdea6344e29c Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 11 Jan 2017 12:25:22 +0100 Subject: [PATCH 014/197] Added an SMTP server to the stack. --- dcompose-stack/radar-cp-hadoop-stack/.gitignore | 1 + dcompose-stack/radar-cp-hadoop-stack/README.md | 2 ++ .../radar-cp-hadoop-stack/docker-compose.yml | 13 +++++++++++++ .../radar-cp-hadoop-stack/mail.env.template | 4 ++++ 4 files changed, 20 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/.gitignore create mode 100644 dcompose-stack/radar-cp-hadoop-stack/mail.env.template diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore new file mode 100644 index 000000000..d23d053c7 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -0,0 +1 @@ +/mail.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 7ba0a358e..8a99d2a15 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -8,6 +8,8 @@ mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-nam docker network create hadoop ``` +Modify `mail.env.template` to set a SMTP host to send emails with, and move it to `mail.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. + Run the setup with ```shell sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index b307bd65a..b296340e5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -10,6 +10,8 @@ networks: driver: bridge hadoop: external: true + mail: + driver: bridge services: @@ -179,3 +181,14 @@ services: environment: - CLUSTER_NAME=radar-cns + #---------------------------------------------------------------------------# + # Email server # + #---------------------------------------------------------------------------# + smtp: + image: namshi/smtp:latest + networks: + - mail + volumes: + - smtp_queue:/var/spool/exim + env_file: + - smtp.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/mail.env.template b/dcompose-stack/radar-cp-hadoop-stack/mail.env.template new file mode 100644 index 000000000..ab7fe491d --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/mail.env.template @@ -0,0 +1,4 @@ +SMARTHOST_ADDRESS=mail.example.com +SMARTHOST_PORT=587 +SMARTHOST_USER=user@example.com +SMARTHOST_PASSWORD=XXXXXXXX From 6b526fbe0c9b612ac4262dd5cd7672fb7f283437 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 11 Jan 2017 16:04:22 +0100 Subject: [PATCH 015/197] Rewrite output path to absolute path --- dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh index fbe46e2a3..802d98393 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh @@ -7,8 +7,9 @@ fi # HDFS filename to get HDFS_FILE=$1 -# Directory to write output to -OUTPUT_DIR=${2:-$PWD/output} +# Absolute directory to write output to +OUTPUT_DIR=${2:-output} +OUTPUT_DIR="$(cd "$(dirname "$OUTPUT_DIR")"; pwd)/$(basename "$OUTPUT_DIR")" # Internal docker directory to write output to HDFS_OUTPUT_DIR=/home/output # HDFS command to run From b485c82fc7bf574ccac38db59ca2df3005661379 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 11 Jan 2017 16:56:52 +0100 Subject: [PATCH 016/197] Updated docs and hdfs containers --- .../radar-cp-hadoop-stack/.gitignore | 2 +- .../radar-cp-hadoop-stack/README.md | 19 ++++++++--- .../radar-cp-hadoop-stack/docker-compose.yml | 33 +++++++++---------- .../{mail.env.template => smtp.env.template} | 0 4 files changed, 31 insertions(+), 23 deletions(-) rename dcompose-stack/radar-cp-hadoop-stack/{mail.env.template => smtp.env.template} (100%) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index d23d053c7..db7bb65a8 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1 +1 @@ -/mail.env +/smtp.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 8a99d2a15..8491b9ff1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,16 +1,27 @@ # RADAR-CNS with a HDFS connector -In the Dockerfile, 2 redundant HDFS volumes and the name directory are mounted. The local paths for those volumes have to be created before the first run. Also, create a docker `hadoop` network. +In the Dockerfile, 2 redundant HDFS volumes and the name directory are mounted. The local root path for those volumes has to be created before the first run. Also, create a docker `hadoop` network. ```shell -DATA_DIR=/usr/local/var/lib/docker -mkdir -p "$DATA_DIR/hadoop-data1" "$DATA_DIR/hadoop-data2" "$DATA_DIR/hadoop-name" +DOCKER_DATA=/usr/local/var/lib/docker +mkdir -p $DOCKER_DATA docker network create hadoop ``` +For a redundant data storage, instead of the directories created in `$DOCKER_DATA`, make symlinks to different volumes: + +```shell +DOCKER_DATA=/usr/local/var/lib/docker +VOLUME_1=/volume1 +VOLUME_2=/volume1 +mkdir -p $VOLUME_1/hdfs-data $VOLUME_2/hdfs-data +ln -s $VOLUME_1/hdfs-data $DOCKER_DATA/hdfs-data1 +ln -s $VOLUME_2/hdfs-data $DOCKER_DATA/hdfs-data2 +``` + Modify `mail.env.template` to set a SMTP host to send emails with, and move it to `mail.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. -Run the setup with +Run the full setup with ```shell sudo docker-compose up -d ``` diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index b296340e5..21a935a27 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -136,7 +136,7 @@ services: #---------------------------------------------------------------------------# # RADAR Dashboard # #---------------------------------------------------------------------------# - nodejs: + dashboard: image: radarcns/radar-dashboard:latest networks: - api @@ -149,37 +149,34 @@ services: #---------------------------------------------------------------------------# # RADAR Cold Storage # #---------------------------------------------------------------------------# - datanode-1: + hdfs-datanode-1: image: uhopper/hadoop-datanode:2.7.2 - domainname: hadoop networks: - hadoop volumes: - - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data + - /usr/local/var/lib/docker/hdfs-data1:/hadoop/dfs/data environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=2 - datanode-2: + CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 + HDFS_CONF_dfs_replication: 2 + + hdfs-datanode-2: image: uhopper/hadoop-datanode:2.7.2 - domainname: hadoop networks: - hadoop volumes: - - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data + - /usr/local/var/lib/docker/hdfs-data2:/hadoop/dfs/data environment: - - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - - HDFS_CONF_dfs_replication=2 - namenode: + CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 + HDFS_CONF_dfs_replication: 2 + + hdfs-namenode: image: uhopper/hadoop-namenode:2.7.2 - hostname: namenode - container_name: namenode - domainname: hadoop networks: - hadoop volumes: - - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name + - /usr/local/var/lib/docker/hdfs-name:/hadoop/dfs/name environment: - - CLUSTER_NAME=radar-cns + CLUSTER_NAME: radar-cns #---------------------------------------------------------------------------# # Email server # @@ -189,6 +186,6 @@ services: networks: - mail volumes: - - smtp_queue:/var/spool/exim + - /var/spool/exim env_file: - smtp.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/mail.env.template b/dcompose-stack/radar-cp-hadoop-stack/smtp.env.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/mail.env.template rename to dcompose-stack/radar-cp-hadoop-stack/smtp.env.template From c2865526755d6e0f143bb2169a091e61b261d9e1 Mon Sep 17 00:00:00 2001 From: Amos Folarin Date: Fri, 13 Jan 2017 15:25:15 +0000 Subject: [PATCH 017/197] create docker-compose.yml v3, rm hadoop from stack --- .../radar-cp-hadoop-stack/docker-compose.yml | 86 ++++++++----------- 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 21a935a27..70684123a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,17 +1,15 @@ --- -version: '2' +version: '3' networks: zookeeper: - driver: bridge + driver: overlay kafka: - driver: bridge + driver: overlay api: - driver: bridge - hadoop: - external: true - mail: - driver: bridge + driver: overlay + #hadoop: + # external: true services: @@ -136,7 +134,7 @@ services: #---------------------------------------------------------------------------# # RADAR Dashboard # #---------------------------------------------------------------------------# - dashboard: + nodejs: image: radarcns/radar-dashboard:latest networks: - api @@ -149,43 +147,35 @@ services: #---------------------------------------------------------------------------# # RADAR Cold Storage # #---------------------------------------------------------------------------# - hdfs-datanode-1: - image: uhopper/hadoop-datanode:2.7.2 - networks: - - hadoop - volumes: - - /usr/local/var/lib/docker/hdfs-data1:/hadoop/dfs/data - environment: - CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 - HDFS_CONF_dfs_replication: 2 - - hdfs-datanode-2: - image: uhopper/hadoop-datanode:2.7.2 - networks: - - hadoop - volumes: - - /usr/local/var/lib/docker/hdfs-data2:/hadoop/dfs/data - environment: - CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 - HDFS_CONF_dfs_replication: 2 + #datanode-1: + # image: uhopper/hadoop-datanode:2.7.2 + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data + # environment: + # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + # - HDFS_CONF_dfs_replication=2 + #datanode-2: + # image: uhopper/hadoop-datanode:2.7.2 + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data + # environment: + # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + # - HDFS_CONF_dfs_replication=2 + #namenode: + # image: uhopper/hadoop-namenode:2.7.2 + # hostname: namenode + # container_name: namenode + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name + # environment: + # - CLUSTER_NAME=radar-cns - hdfs-namenode: - image: uhopper/hadoop-namenode:2.7.2 - networks: - - hadoop - volumes: - - /usr/local/var/lib/docker/hdfs-name:/hadoop/dfs/name - environment: - CLUSTER_NAME: radar-cns - - #---------------------------------------------------------------------------# - # Email server # - #---------------------------------------------------------------------------# - smtp: - image: namshi/smtp:latest - networks: - - mail - volumes: - - /var/spool/exim - env_file: - - smtp.env From d92164ec5de137f7a004ef4ebc93511ba407225d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 16 Jan 2017 10:40:12 +0100 Subject: [PATCH 018/197] Support for multiple name node directories. This way, the name node stores the directory information (+- like filesystem inodes) in duplicate. By mounting the name directory on different volumes, some redundancy in data can be achieved. --- .../radar-cp-hadoop-stack/README.md | 9 ++++--- .../radar-cp-hadoop-stack/docker-compose.yml | 7 ++++-- .../hdfs-namenode/Dockerfile | 4 ++++ .../hdfs-namenode/run.sh | 24 +++++++++++++++++++ 4 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/Dockerfile create mode 100644 dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/run.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 8491b9ff1..946e0374b 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -14,9 +14,12 @@ For a redundant data storage, instead of the directories created in `$DOCKER_DAT DOCKER_DATA=/usr/local/var/lib/docker VOLUME_1=/volume1 VOLUME_2=/volume1 -mkdir -p $VOLUME_1/hdfs-data $VOLUME_2/hdfs-data -ln -s $VOLUME_1/hdfs-data $DOCKER_DATA/hdfs-data1 -ln -s $VOLUME_2/hdfs-data $DOCKER_DATA/hdfs-data2 +mkdir -p "$VOLUME_1/hdfs-data" "$VOLUME_1/hdfs-name" +mkdir -p "$VOLUME_2/hdfs-data" "$VOLUME_2/hdfs-name" +ln -s "$VOLUME_1/hdfs-data" "$DOCKER_DATA/hdfs-data1" +ln -s "$VOLUME_2/hdfs-data" "$DOCKER_DATA/hdfs-data2" +ln -s "$VOLUME_1/hdfs-name" "$DOCKER_DATA/hdfs-name1" +ln -s "$VOLUME_2/hdfs-name" "$DOCKER_DATA/hdfs-name2" ``` Modify `mail.env.template` to set a SMTP host to send emails with, and move it to `mail.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 21a935a27..b9d214838 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -170,13 +170,16 @@ services: HDFS_CONF_dfs_replication: 2 hdfs-namenode: - image: uhopper/hadoop-namenode:2.7.2 + build: hdfs-namenode + image: radarcns/hdfs-namenode:2.7.2 networks: - hadoop volumes: - - /usr/local/var/lib/docker/hdfs-name:/hadoop/dfs/name + - /usr/local/var/lib/docker/hdfs-name1:/hadoop/dfs/name/1 + - /usr/local/var/lib/docker/hdfs-name2:/hadoop/dfs/name/2 environment: CLUSTER_NAME: radar-cns + HDFS_CONF_dfs_namenode_name_dir: file:///hadoop/dfs/name/1,file:///hadoop/dfs/name/2 #---------------------------------------------------------------------------# # Email server # diff --git a/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/Dockerfile new file mode 100644 index 000000000..82f947883 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/Dockerfile @@ -0,0 +1,4 @@ +FROM uhopper/hadoop-namenode:2.7.2 + +ADD run.sh /run.sh +RUN chmod a+x /run.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/run.sh b/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/run.sh new file mode 100644 index 000000000..c4f7bbb2e --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs-namenode/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +if [ -z "$CLUSTER_NAME" ]; then + echo "Cluster name not specified" + exit 2 +fi + +IFS=',' read -r -a namedirs <<< $(echo "$HDFS_CONF_dfs_namenode_name_dir" | sed -e 's#file://##g') + +for namedir in "${namedirs[@]}"; do + mkdir -p "$namedir" + if [ ! -d "$namedir" ]; then + echo "Namenode name directory not found: $namedir" + exit 2 + fi + + if [ -z "$(ls -A "$namedir")" ]; then + echo "Formatting namenode name directory: $namedir is not yet formatted" + $HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode -format $CLUSTER_NAME + break + fi +done + +$HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode From 306d360f9ee0840f1ed5b71f8a4f9e95babe88aa Mon Sep 17 00:00:00 2001 From: "Amos Folarin (home)" Date: Mon, 16 Jan 2017 09:44:24 +0000 Subject: [PATCH 019/197] move swarm deploy to different folder on different branch --- .../radar-cp-hadoop-stack/docker-compose.yml | 86 +++++---- dcompose-stack/radar-cp-swarm-stack/README.md | 7 + .../radar-cp-swarm-stack/docker-compose.yml | 181 ++++++++++++++++++ 3 files changed, 236 insertions(+), 38 deletions(-) create mode 100644 dcompose-stack/radar-cp-swarm-stack/README.md create mode 100644 dcompose-stack/radar-cp-swarm-stack/docker-compose.yml diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 70684123a..21a935a27 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,15 +1,17 @@ --- -version: '3' +version: '2' networks: zookeeper: - driver: overlay + driver: bridge kafka: - driver: overlay + driver: bridge api: - driver: overlay - #hadoop: - # external: true + driver: bridge + hadoop: + external: true + mail: + driver: bridge services: @@ -134,7 +136,7 @@ services: #---------------------------------------------------------------------------# # RADAR Dashboard # #---------------------------------------------------------------------------# - nodejs: + dashboard: image: radarcns/radar-dashboard:latest networks: - api @@ -147,35 +149,43 @@ services: #---------------------------------------------------------------------------# # RADAR Cold Storage # #---------------------------------------------------------------------------# - #datanode-1: - # image: uhopper/hadoop-datanode:2.7.2 - # domainname: hadoop - # networks: - # - hadoop - # volumes: - # - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data - # environment: - # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - # - HDFS_CONF_dfs_replication=2 - #datanode-2: - # image: uhopper/hadoop-datanode:2.7.2 - # domainname: hadoop - # networks: - # - hadoop - # volumes: - # - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data - # environment: - # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 - # - HDFS_CONF_dfs_replication=2 - #namenode: - # image: uhopper/hadoop-namenode:2.7.2 - # hostname: namenode - # container_name: namenode - # domainname: hadoop - # networks: - # - hadoop - # volumes: - # - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name - # environment: - # - CLUSTER_NAME=radar-cns + hdfs-datanode-1: + image: uhopper/hadoop-datanode:2.7.2 + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hdfs-data1:/hadoop/dfs/data + environment: + CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 + HDFS_CONF_dfs_replication: 2 + + hdfs-datanode-2: + image: uhopper/hadoop-datanode:2.7.2 + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hdfs-data2:/hadoop/dfs/data + environment: + CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 + HDFS_CONF_dfs_replication: 2 + hdfs-namenode: + image: uhopper/hadoop-namenode:2.7.2 + networks: + - hadoop + volumes: + - /usr/local/var/lib/docker/hdfs-name:/hadoop/dfs/name + environment: + CLUSTER_NAME: radar-cns + + #---------------------------------------------------------------------------# + # Email server # + #---------------------------------------------------------------------------# + smtp: + image: namshi/smtp:latest + networks: + - mail + volumes: + - /var/spool/exim + env_file: + - smtp.env diff --git a/dcompose-stack/radar-cp-swarm-stack/README.md b/dcompose-stack/radar-cp-swarm-stack/README.md new file mode 100644 index 000000000..5a9061220 --- /dev/null +++ b/dcompose-stack/radar-cp-swarm-stack/README.md @@ -0,0 +1,7 @@ +# RADAR-CNS with multi-node cluster using Docker Swarm + +# Run the full setup with +```shell +docker deploy --compose-file docker-compose.yml radar-cns-stack +``` + diff --git a/dcompose-stack/radar-cp-swarm-stack/docker-compose.yml b/dcompose-stack/radar-cp-swarm-stack/docker-compose.yml new file mode 100644 index 000000000..70684123a --- /dev/null +++ b/dcompose-stack/radar-cp-swarm-stack/docker-compose.yml @@ -0,0 +1,181 @@ +--- +version: '3' + +networks: + zookeeper: + driver: overlay + kafka: + driver: overlay + api: + driver: overlay + #hadoop: + # external: true + +services: + + #---------------------------------------------------------------------------# + # Zookeeper Cluster # + #---------------------------------------------------------------------------# + zookeeper-1: + image: confluentinc/cp-zookeeper:3.1.1 + networks: + - zookeeper + environment: + ZOOKEEPER_SERVER_ID: 1 + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + ZOOKEEPER_INIT_LIMIT: 5 + ZOOKEEPER_SYNC_LIMIT: 2 + ZOOKEEPER_SERVERS: zookeeper-1:2888:3888 + + #---------------------------------------------------------------------------# + # Kafka Cluster # + #---------------------------------------------------------------------------# + kafka-1: + image: confluentinc/cp-kafka:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - zookeeper-1 + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 + + kafka-2: + image: confluentinc/cp-kafka:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - kafka-1 + environment: + KAFKA_BROKER_ID: 2 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 + + kafka-3: + image: confluentinc/cp-kafka:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - kafka-2 + environment: + KAFKA_BROKER_ID: 3 + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092 + + #---------------------------------------------------------------------------# + # Schema Registry # + #---------------------------------------------------------------------------# + schema-registry-1: + image: confluentinc/cp-schema-registry:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - kafka-1 + - kafka-2 + - kafka-3 + restart: always + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:2181 + SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 + SCHEMA_REGISTRY_LISTENERS: http://schema-registry-1:8081 + + #---------------------------------------------------------------------------# + # REST proxy # + #---------------------------------------------------------------------------# + rest-proxy-1: + image: confluentinc/cp-kafka-rest:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + ports: + - "8082:8082" + environment: + KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 + KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 + KAFKA_REST_HOST_NAME: rest-proxy-1 + + + #---------------------------------------------------------------------------# + # RADAR Hot Storage # + #---------------------------------------------------------------------------# + mongo: + image: mongo:3.2.10 + networks: + - api + ports: + - "27017:27017" + + #---------------------------------------------------------------------------# + # RADAR REST API # + #---------------------------------------------------------------------------# + tomcat: + image: tomcat:8.0.37 + networks: + - api + ports: + - "8080:8080" + depends_on: + - mongo + + #---------------------------------------------------------------------------# + # RADAR Dashboard # + #---------------------------------------------------------------------------# + nodejs: + image: radarcns/radar-dashboard:latest + networks: + - api + ports: + - "3000:3000" + depends_on: + - tomcat + + + #---------------------------------------------------------------------------# + # RADAR Cold Storage # + #---------------------------------------------------------------------------# + #datanode-1: + # image: uhopper/hadoop-datanode:2.7.2 + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-data1:/hadoop/dfs/data + # environment: + # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + # - HDFS_CONF_dfs_replication=2 + #datanode-2: + # image: uhopper/hadoop-datanode:2.7.2 + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-data2:/hadoop/dfs/data + # environment: + # - CORE_CONF_fs_defaultFS=hdfs://namenode:8020 + # - HDFS_CONF_dfs_replication=2 + #namenode: + # image: uhopper/hadoop-namenode:2.7.2 + # hostname: namenode + # container_name: namenode + # domainname: hadoop + # networks: + # - hadoop + # volumes: + # - /usr/local/var/lib/docker/hadoop-name:/hadoop/dfs/name + # environment: + # - CLUSTER_NAME=radar-cns + From 957ecf9055b573a720bc7c7f7c4786d01b0f04b7 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:21:33 +0100 Subject: [PATCH 020/197] Moved HDFS volume configuration to a separate `.env` file --- dcompose-stack/radar-cp-hadoop-stack/.env | 4 ++++ .../radar-cp-hadoop-stack/README.md | 22 ++++--------------- .../radar-cp-hadoop-stack/docker-compose.yml | 8 +++---- 3 files changed, 12 insertions(+), 22 deletions(-) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env new file mode 100644 index 000000000..7a1cbdf58 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -0,0 +1,4 @@ +HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 +HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 +HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 +HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 946e0374b..6fb51c7b6 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,29 +1,15 @@ # RADAR-CNS with a HDFS connector -In the Dockerfile, 2 redundant HDFS volumes and the name directory are mounted. The local root path for those volumes has to be created before the first run. Also, create a docker `hadoop` network. +In the Dockerfile, 2 redundant HDFS volumes and 2 redundant directories are mounted. Set these directories in the `.env` file, and ensure that their parent directory exists. For proper redundancy, the directories should be set to different physical volumes. -```shell -DOCKER_DATA=/usr/local/var/lib/docker -mkdir -p $DOCKER_DATA -docker network create hadoop -``` +Modify `smtp.env.template` to set a SMTP host to send emails with, and move it to `smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. -For a redundant data storage, instead of the directories created in `$DOCKER_DATA`, make symlinks to different volumes: +Then, create a docker `hadoop` network. ```shell -DOCKER_DATA=/usr/local/var/lib/docker -VOLUME_1=/volume1 -VOLUME_2=/volume1 -mkdir -p "$VOLUME_1/hdfs-data" "$VOLUME_1/hdfs-name" -mkdir -p "$VOLUME_2/hdfs-data" "$VOLUME_2/hdfs-name" -ln -s "$VOLUME_1/hdfs-data" "$DOCKER_DATA/hdfs-data1" -ln -s "$VOLUME_2/hdfs-data" "$DOCKER_DATA/hdfs-data2" -ln -s "$VOLUME_1/hdfs-name" "$DOCKER_DATA/hdfs-name1" -ln -s "$VOLUME_2/hdfs-name" "$DOCKER_DATA/hdfs-name2" +docker network create hadoop ``` -Modify `mail.env.template` to set a SMTP host to send emails with, and move it to `mail.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. - Run the full setup with ```shell sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index b9d214838..ea1e9cc67 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -154,7 +154,7 @@ services: networks: - hadoop volumes: - - /usr/local/var/lib/docker/hdfs-data1:/hadoop/dfs/data + - "${HDFS_DATA_DIR_1}:/hadoop/dfs/data" environment: CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 HDFS_CONF_dfs_replication: 2 @@ -164,7 +164,7 @@ services: networks: - hadoop volumes: - - /usr/local/var/lib/docker/hdfs-data2:/hadoop/dfs/data + - "${HDFS_DATA_DIR_2}:/hadoop/dfs/data" environment: CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 HDFS_CONF_dfs_replication: 2 @@ -175,8 +175,8 @@ services: networks: - hadoop volumes: - - /usr/local/var/lib/docker/hdfs-name1:/hadoop/dfs/name/1 - - /usr/local/var/lib/docker/hdfs-name2:/hadoop/dfs/name/2 + - "${HDFS_NAME_DIR_1}:/hadoop/dfs/name/1" + - "${HDFS_NAME_DIR_2}:/hadoop/dfs/name/2" environment: CLUSTER_NAME: radar-cns HDFS_CONF_dfs_namenode_name_dir: file:///hadoop/dfs/name/1,file:///hadoop/dfs/name/2 From 2db2f3b5760ee8b44025cddf00712ebe2a4d70ba Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:26:58 +0100 Subject: [PATCH 021/197] Added Travis test This does not include any testing on whether the containers actually run, only whether the docker-compose commands run succesfully. --- .travis.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..d040affba --- /dev/null +++ b/.travis.yml @@ -0,0 +1,27 @@ +sudo: required +services: + - docker +env: + DOCKER_COMPOSE_VERSION: 1.5.0 + +before_install: + - mkdir -p "$HOME/bin"; + - export PATH="$PATH:$HOME/bin"; + - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; + - chmod +x "$HOME/bin/docker-compose"; +script: + - cd dcompose-stack/radar-cp-stack + - sudo docker-compose up -d build + - sudo docker-compose down + - cd ../radar-cp-sasl-stack + - sudo docker-compose up -d build + - sudo docker-compose down + - cd ../radar-cp-hadoop-stack + - sudo docker network create hadoop + - export HDFS_DATA_DIR_1=$PWD/hdfs-data1 + - export HDFS_DATA_DIR_2=$PWD/hdfs-data2 + - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 + - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 + - sudo docker-compose up -d build + - sudo docker-compose down + From e788990eab0d67dbfa3a1a9a27cf9181117b9316 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:30:05 +0100 Subject: [PATCH 022/197] Fixed docker-compose flag --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index d040affba..5c73a0e38 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,10 +11,10 @@ before_install: - chmod +x "$HOME/bin/docker-compose"; script: - cd dcompose-stack/radar-cp-stack - - sudo docker-compose up -d build + - sudo docker-compose up -d --build - sudo docker-compose down - cd ../radar-cp-sasl-stack - - sudo docker-compose up -d build + - sudo docker-compose up -d --build - sudo docker-compose down - cd ../radar-cp-hadoop-stack - sudo docker network create hadoop @@ -22,6 +22,6 @@ script: - export HDFS_DATA_DIR_2=$PWD/hdfs-data2 - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - - sudo docker-compose up -d build + - sudo docker-compose up -d --build - sudo docker-compose down From ff91277e2b9494618fd594d48fd9816a6d18b6bb Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:43:38 +0100 Subject: [PATCH 023/197] Test whether the docker containers are actually run --- .travis.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5c73a0e38..0e111d558 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,10 +11,10 @@ before_install: - chmod +x "$HOME/bin/docker-compose"; script: - cd dcompose-stack/radar-cp-stack - - sudo docker-compose up -d --build + - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down - cd ../radar-cp-sasl-stack - - sudo docker-compose up -d --build + - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down - cd ../radar-cp-hadoop-stack - sudo docker network create hadoop @@ -22,6 +22,7 @@ script: - export HDFS_DATA_DIR_2=$PWD/hdfs-data2 - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - - sudo docker-compose up -d --build + - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > smtp.env + - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down From 5825152c21b2b9542db3422c53cdbb1b57fec22a Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:48:59 +0100 Subject: [PATCH 024/197] Using Travis "generic" language See https://github.com/travis-ci/travis-ci/issues/2867#issuecomment-59262795 --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 0e111d558..c9a766e1c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +language: generic sudo: required services: - docker From 3f75a97ba2461a212c8402d2855eea2b4a276e56 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 10:55:12 +0100 Subject: [PATCH 025/197] Using latest stable version of docker-compose and added documentation --- .travis.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index c9a766e1c..a3dcedca6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: required services: - docker env: - DOCKER_COMPOSE_VERSION: 1.5.0 + DOCKER_COMPOSE_VERSION: 1.9.0 before_install: - mkdir -p "$HOME/bin"; @@ -11,12 +11,17 @@ before_install: - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; - chmod +x "$HOME/bin/docker-compose"; script: + # Standard stack - cd dcompose-stack/radar-cp-stack - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down + + # With kerberos support - cd ../radar-cp-sasl-stack - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down + + # With email and HDFS support - cd ../radar-cp-hadoop-stack - sudo docker network create hadoop - export HDFS_DATA_DIR_1=$PWD/hdfs-data1 From db1f1a7843a26dbbda0670fc3d0d4aeccdf02e3a Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 11:15:04 +0100 Subject: [PATCH 026/197] Do not test docker-compose files that are known to fail --- .travis.yml | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index a3dcedca6..8587b95c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,9 +17,10 @@ script: - sudo docker-compose down # With kerberos support - - cd ../radar-cp-sasl-stack - - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - - sudo docker-compose down + # NOT SUPPORTED: kerberos image cannot be found + #- cd ../radar-cp-sasl-stack + #- sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] + #- sudo docker-compose down # With email and HDFS support - cd ../radar-cp-hadoop-stack @@ -31,4 +32,11 @@ script: - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > smtp.env - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down + - sudo docker network rm hadoop + # With Docker Swarm support + # NOT SUPPORTED: docker swarm and docker beta features are not available in Travis + #- cd ../radar-cp-swarm-stack + #- sudo docker network create --attachable hadoop + #- sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] + #- sudo docker-compose down From fd23b3b7b521ce7422127a15f8218021ff807d6a Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 11:26:08 +0100 Subject: [PATCH 027/197] Moved work in progress stacks to wip folder --- {dcompose-stack => wip}/radar-cp-sasl-stack/docker-compose.yml | 0 .../radar-cp-sasl-stack/secrets/broker1_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/broker2_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/broker3_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/consumer_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/create-certs.sh | 0 .../radar-cp-sasl-stack/secrets/host.consumer.ssl.config | 0 .../radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config | 0 .../radar-cp-sasl-stack/secrets/host.producer.ssl.config | 0 .../radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config | 0 {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/krb.conf | 0 .../radar-cp-sasl-stack/secrets/producer_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf | 0 .../radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf | 0 {dcompose-stack => wip}/radar-cp-swarm-stack/README.md | 0 {dcompose-stack => wip}/radar-cp-swarm-stack/docker-compose.yml | 0 17 files changed, 0 insertions(+), 0 deletions(-) rename {dcompose-stack => wip}/radar-cp-sasl-stack/docker-compose.yml (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/broker1_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/broker2_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/broker3_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/consumer_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/create-certs.sh (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/host.consumer.ssl.config (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/host.producer.ssl.config (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/krb.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/producer_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf (100%) rename {dcompose-stack => wip}/radar-cp-swarm-stack/README.md (100%) rename {dcompose-stack => wip}/radar-cp-swarm-stack/docker-compose.yml (100%) diff --git a/dcompose-stack/radar-cp-sasl-stack/docker-compose.yml b/wip/radar-cp-sasl-stack/docker-compose.yml similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/docker-compose.yml rename to wip/radar-cp-sasl-stack/docker-compose.yml diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/broker1_jaas.conf b/wip/radar-cp-sasl-stack/secrets/broker1_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/broker1_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/broker1_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/broker2_jaas.conf b/wip/radar-cp-sasl-stack/secrets/broker2_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/broker2_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/broker2_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/broker3_jaas.conf b/wip/radar-cp-sasl-stack/secrets/broker3_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/broker3_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/broker3_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/consumer_jaas.conf b/wip/radar-cp-sasl-stack/secrets/consumer_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/consumer_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/consumer_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/create-certs.sh b/wip/radar-cp-sasl-stack/secrets/create-certs.sh similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/create-certs.sh rename to wip/radar-cp-sasl-stack/secrets/create-certs.sh diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/host.consumer.ssl.config b/wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.config similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/host.consumer.ssl.config rename to wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.config diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config b/wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config rename to wip/radar-cp-sasl-stack/secrets/host.consumer.ssl.sasl.config diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/host.producer.ssl.config b/wip/radar-cp-sasl-stack/secrets/host.producer.ssl.config similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/host.producer.ssl.config rename to wip/radar-cp-sasl-stack/secrets/host.producer.ssl.config diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config b/wip/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config rename to wip/radar-cp-sasl-stack/secrets/host.producer.ssl.sasl.config diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/krb.conf b/wip/radar-cp-sasl-stack/secrets/krb.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/krb.conf rename to wip/radar-cp-sasl-stack/secrets/krb.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/producer_jaas.conf b/wip/radar-cp-sasl-stack/secrets/producer_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/producer_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/producer_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf b/wip/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/zookeeper_1_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf b/wip/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/zookeeper_2_jaas.conf diff --git a/dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf b/wip/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf similarity index 100% rename from dcompose-stack/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf rename to wip/radar-cp-sasl-stack/secrets/zookeeper_3_jaas.conf diff --git a/dcompose-stack/radar-cp-swarm-stack/README.md b/wip/radar-cp-swarm-stack/README.md similarity index 100% rename from dcompose-stack/radar-cp-swarm-stack/README.md rename to wip/radar-cp-swarm-stack/README.md diff --git a/dcompose-stack/radar-cp-swarm-stack/docker-compose.yml b/wip/radar-cp-swarm-stack/docker-compose.yml similarity index 100% rename from dcompose-stack/radar-cp-swarm-stack/docker-compose.yml rename to wip/radar-cp-swarm-stack/docker-compose.yml From b37a35a098b1fde554170852828ab61a7899267a Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 13:23:13 +0100 Subject: [PATCH 028/197] Added more information in the root README --- README.md | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e677329b1..cedda4b3a 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,39 @@ # RADAR-Docker + The dockerized RADAR stack or deploying the RADAR-CNS platform. Component repositories can be found here [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) -# Overview +## Installation instructions + +First install Docker and `docker-compose` for your respective platform. Docker has installers for [macOS](https://docs.docker.com/engine/installation/mac/) and [Windows](https://docs.docker.com/engine/installation/windows/). For Ubuntu, see our [wiki page](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu). For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). + +## Usage +We currently have two stacks available to run, one just for Kafka, and another for the complete RADAR-CNS platform. -# Deployment Instructions +### Kafka platform -## Non-Kerberized stack -```sh -$ cd RADAR-Docker/dcompose-stack/radar-cp-stack/ +In this stack, only the Confluent platform is set up + +```shell +$ cd dcompose-stack/radar-cp-stack/ $ docker-compose up ``` -## Kerberized stack +### RADAR-CNS platform + +In this stack, the Confluent platform is set up with a Hadoop data storage, email server, dashboard, MongoDB, and a REST API. See the README for the platform, in the `dcompose-stack/radar-hadoop-cp-stack` directory for more information on how to run it. -```sh -$ cd RADAR-Docker/dcompose-stack/radar-cp-sasl-stack/ +## Work in progress + +### Kerberized stack + +In this setup, Kerberos is used to secure the connections between the Kafka brokers, Zookeeper and the Kafka REST API. Unfortunately, the Kerberos container from Confluent is not publicly available, so an alternative has to be found here. + +```shell +$ cd wip/radar-cp-sasl-stack/ $ docker-compose up ``` -* still WIP * -# Multi-host setup +## Multi-host setup + +In the end, we aim to deploy the platform in a multi-host environment. We are currently aiming for a deployment with Docker Swarm. This setup uses features that are not yet released in the stable Docker Engine. Once they are, this stack may become the main Docker stack. See the `wip/radar-swarm-cp-stack/` directory for more information. From a2176c0d433ad4df9d19caf1013dfd47533d2a74 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 13:27:31 +0100 Subject: [PATCH 029/197] Update README.md --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index cedda4b3a..6a1cc89a0 100644 --- a/README.md +++ b/README.md @@ -8,11 +8,11 @@ First install Docker and `docker-compose` for your respective platform. Docker h ## Usage -We currently have two stacks available to run, one just for Kafka, and another for the complete RADAR-CNS platform. +We currently have two stacks available to run, one for the community parts of the Confluent Kafka Platform and another for the complete RADAR-CNS platform. -### Kafka platform +### Confluent Kafka platform -In this stack, only the Confluent platform is set up +In this stack, only the Confluent Kafka Platform is set up. ```shell $ cd dcompose-stack/radar-cp-stack/ @@ -21,7 +21,7 @@ $ docker-compose up ### RADAR-CNS platform -In this stack, the Confluent platform is set up with a Hadoop data storage, email server, dashboard, MongoDB, and a REST API. See the README for the platform, in the `dcompose-stack/radar-hadoop-cp-stack` directory for more information on how to run it. +In this stack, the Confluent platform is set up with a Hadoop data storage, email server, RADAR-Dashboard, RADAR-HotStorage, and a REST API. See the README in the `dcompose-stack/radar-hadoop-cp-stack` directory for more information on how to run it. ## Work in progress @@ -34,6 +34,6 @@ $ cd wip/radar-cp-sasl-stack/ $ docker-compose up ``` -## Multi-host setup +### Multi-host setup In the end, we aim to deploy the platform in a multi-host environment. We are currently aiming for a deployment with Docker Swarm. This setup uses features that are not yet released in the stable Docker Engine. Once they are, this stack may become the main Docker stack. See the `wip/radar-swarm-cp-stack/` directory for more information. From f6c569b8eb81c37171ea286a8429b2530359aef1 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 17 Jan 2017 14:27:02 +0100 Subject: [PATCH 030/197] Monitoring of the docker-compose stack --- .../radar-cp-hadoop-stack/docker-compose.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index ea1e9cc67..49de42b51 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -192,3 +192,22 @@ services: - /var/spool/exim env_file: - smtp.env + + #---------------------------------------------------------------------------# + # Monitoring # + #---------------------------------------------------------------------------# + cadvisor: + image: google/cadvisor:v0.24.1 + ports: + - "8181:8080" + volumes: + - "/:/rootfs:ro" + - "/var/run:/var/run:rw" + - "/sys:/sys:ro" + - "/var/lib/docker/:/var/lib/docker:ro" + portainer: + image: portainer/portainer:1.11.1 + ports: + - "8182:9000" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" From 19e1c2e5cd8ed0a35136b77c3ecb26540e102df0 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 18 Jan 2017 16:26:03 +0100 Subject: [PATCH 031/197] docker-compose with backend --- .../radar-cp-hadoop-stack/docker-compose.yml | 117 ++++++++++++++++++ .../radar-cp-hadoop-stack/radar.yml | 47 +++++++ .../sink-hdfs.properties | 8 ++ .../sink-radar.properties | 25 ++++ 4 files changed, 197 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/radar.yml create mode 100644 dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties create mode 100644 dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index ea1e9cc67..60c284c37 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -192,3 +192,120 @@ services: - /var/spool/exim env_file: - smtp.env + + #---------------------------------------------------------------------------# + # RADAR mongo connector # + #---------------------------------------------------------------------------# + radar-mongodb-connector: + image: radarcns/radar-mongodb-connector:0.1 + restart: on-failure:3 + volumes: + - ./sink-radar.properties:/etc/kafka-connect/sink.properties + networks: + - zookeeper + - kafka + - api + depends_on: + - mongo + - zookeeper-1 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + - rest-proxy-1 + environment: + CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 + CONNECT_REST_PORT: 8082 + CONNECT_GROUP_ID: "default" + CONNECT_CONFIG_STORAGE_TOPIC: "default.config" + CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" + CONNECT_STATUS_STORAGE_TOPIC: "default.status" + CONNECT_KEY_CONVERTER: "io.confluent.connect.avro.AvroConverter" + CONNECT_VALUE_CONVERTER: "io.confluent.connect.avro.AvroConverter" + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" + CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" + CONNECT_REST_ADVERTISED_HOST_NAME: "radar-mongodb-connector" + CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + + #---------------------------------------------------------------------------# + # RADAR HDFS connector # + #---------------------------------------------------------------------------# + radar-hdfs-connector: + image: radarcns/radar-hdfs-connector:0.1 + restart: on-failure:3 + volumes: + - ./sink-hdfs.properties:/etc/kafka-connect/sink.properties + networks: + - zookeeper + - kafka + - api + - hadoop + depends_on: + - zookeeper-1 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + - rest-proxy-1 + - hdfs-datanode-1 + - hdfs-datanode-2 + - hdfs-namenode + environment: + CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 + CONNECT_REST_PORT: 8082 + CONNECT_GROUP_ID: "default" + CONNECT_CONFIG_STORAGE_TOPIC: "default.config" + CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" + CONNECT_STATUS_STORAGE_TOPIC: "default.status" + CONNECT_KEY_CONVERTER: "io.confluent.connect.avro.AvroConverter" + CONNECT_VALUE_CONVERTER: "io.confluent.connect.avro.AvroConverter" + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "http://schema-registry-1:8081" + CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" + CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" + CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + + #---------------------------------------------------------------------------# + # RADAR backend streams # + #---------------------------------------------------------------------------# + radar-backend-stream: + image: radarcns/radar-backend-kafka:0.1 + command: + - stream + networks: + - zookeeper + - kafka + depends_on: + - zookeeper-1 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + volumes: + - ./radar.yml:/etc/radar.yml + + #---------------------------------------------------------------------------# + # RADAR backend monitor # + #---------------------------------------------------------------------------# + radar-backend-monitor: + image: radarcns/radar-backend-kafka:0.1 + command: + - monitor + networks: + - zookeeper + - kafka + - mail + depends_on: + - zookeeper-1 + - kafka-1 + - kafka-2 + - kafka-3 + - schema-registry-1 + - smtp + volumes: + - ./radar.yml:/etc/radar.yml diff --git a/dcompose-stack/radar-cp-hadoop-stack/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/radar.yml new file mode 100644 index 000000000..a372883e2 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/radar.yml @@ -0,0 +1,47 @@ +version: 1.0 +released: 2016-11-27 + +#============================= Application =============================# +log_path: +#Possible value are standalone or high_performance +mode: standalone + +#============================== Zookeeper ==============================# +#List of Zookeeper instances +zookeeper: + - host: zookeeper-1 + port: 2181 + +#================================ Kafka ================================# +#List of Kafka brokers +broker: + - host: kafka-1 + port: 9092 + +#Kafka internal parameters +auto_commit_interval_ms: 1000 +session_timeout_ms: 10000 + +#============================ Kafka Streams ============================# +#The number of threads that a stream must be run according is priority +stream_priority: + low: 1 + normal: 2 + high: 4 + +#=========================== Schema Registry ===========================# +#List of Schema Registry instances +schema_registry: + - host: schema-registry-1 + port: 8081 + protocol: http + +#========================= Battery level monitor=========================# +battery_monitor: + level: CRITICAL + email_address: notifiersemail + email_host: smtp + email_port: 25 + email_user: user@example.com + topics: + - android_empatica_e4_battery_level \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties new file mode 100644 index 000000000..d5d598c5f --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties @@ -0,0 +1,8 @@ +name=radar-hdfs-sink-android-15000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_blood_volume_pulse,android_empatica_e4_temperature +flush.size=150 +hdfs.url=hdfs://hdfs-namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidNew \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties new file mode 100644 index 000000000..f4b641e20 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties @@ -0,0 +1,25 @@ +# Kafka consumer configuration +name=radar-connector-mongodb-sink + +# Kafka connector configuration +connector.class=org.radarcns.mongodb.MongoDbSinkConnector +tasks.max=1 + +# Topics that will be consumed +topics=android_empatica_e4_battery_level,android_empatica_e4_battery_level_output + +# MongoDB server +mongo.host=mongo +mongo.port=27017 + +# MongoDB configuration +#mongo.username= +#mongo.password= +mongo.database=mydbase + +# Collection name for putting data into the MongoDB database. The {$topic} token will be replaced +# by the Kafka topic name. +#mongo.collection.format={$topic} + +# Factory class to do the actual record conversion +record.converter.class=org.radarcns.sink.mongodb.RecordConverterFactoryRadar From 19381c827956a95a728de546431f4d3135365ca9 Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Fri, 20 Jan 2017 12:17:27 +0100 Subject: [PATCH 032/197] Update README.md Update documentation. --- README.md | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6a1cc89a0..e353d457b 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,26 @@ The dockerized RADAR stack or deploying the RADAR-CNS platform. Component repositories can be found here [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) ## Installation instructions - -First install Docker and `docker-compose` for your respective platform. Docker has installers for [macOS](https://docs.docker.com/engine/installation/mac/) and [Windows](https://docs.docker.com/engine/installation/windows/). For Ubuntu, see our [wiki page](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu). For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). - +To install RADAR-CNS stack, do the following: + +1. Install Docker Engine and verify your installation. + * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) + * Installation for Windows ( Follow [installer](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) from Docker) + * Installation for Ubuntu (Follow our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) page) + * For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). +2. Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). +3. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. +3. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. + + ``` + git clone https://github.com/RADAR-CNS/RADAR-Docker.git + ``` ## Usage -We currently have two stacks available to run, one for the community parts of the Confluent Kafka Platform and another for the complete RADAR-CNS platform. +RADAR-Docker currently offers two component stacks to run. + +1. A Docker-compose for components from [Confluent Kafka Platform](http://docs.confluent.io/3.1.1/) community +2. A Docker-compose for components from RADAR-CNS platform. ### Confluent Kafka platform From 3858bbe5d417223c6b596c1956890725e491e5a4 Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Fri, 20 Jan 2017 13:44:59 +0100 Subject: [PATCH 033/197] update current chang --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e353d457b..f0128c2e7 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ To install RADAR-CNS stack, do the following: ``` git clone https://github.com/RADAR-CNS/RADAR-Docker.git ``` + ## Usage RADAR-Docker currently offers two component stacks to run. From 0ccfab04457130e689e2a465780f1060074b6c67 Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Fri, 20 Jan 2017 14:58:43 +0100 Subject: [PATCH 034/197] Updated installation guide --- README.md | 84 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index f0128c2e7..ae1a9e411 100644 --- a/README.md +++ b/README.md @@ -12,34 +12,98 @@ To install RADAR-CNS stack, do the following: * For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). 2. Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). 3. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. -3. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. +4. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. ``` git clone https://github.com/RADAR-CNS/RADAR-Docker.git ``` - +5. Install required component stack following the instructions below. + ## Usage RADAR-Docker currently offers two component stacks to run. -1. A Docker-compose for components from [Confluent Kafka Platform](http://docs.confluent.io/3.1.1/) community +1. A Docker-compose for components from [Confluent Kafka Platform](http://docs.confluent.io/3.1.0/) community 2. A Docker-compose for components from RADAR-CNS platform. ### Confluent Kafka platform +Confluent Kafka platform offers integration of the basic components for streaming such as Zookeeper, Kafka brokers, Schema registry and REST-Proxy. -In this stack, only the Confluent Kafka Platform is set up. - +To run this stack on a single-node setup: ```shell -$ cd dcompose-stack/radar-cp-stack/ +$ cd RADAR-Docker/dcompose-stack/radar-cp-stack/ $ docker-compose up ``` +To stop this stack: +```shell +$ docker-compose down +``` ### RADAR-CNS platform -In this stack, the Confluent platform is set up with a Hadoop data storage, email server, RADAR-Dashboard, RADAR-HotStorage, and a REST API. See the README in the `dcompose-stack/radar-hadoop-cp-stack` directory for more information on how to run it. - -## Work in progress - +In addition to Confluent Kafka platform compoents, RADAR-CNS platform offers +* RADAR-HDFS-Connector - Cold storage of selected streams in Hadoop data storage, +* RADAR-MongoDB-Connector - Hot storage of selected streams in MongoDB, +* [RADAR-Dashboard](https://github.com/RADAR-CNS/RADAR-Dashboard), +* RADAR-Streams - real-time aggregated streams, +* RADAR-Monitor - Status monitors, +* [RADAR-HotStorage](https://github.com/RADAR-CNS/RADAR-HotStorage) via MongoDB, +* [RADAR-REST API](https://github.com/RADAR-CNS/RADAR-RestApi), +* a Hadoop cluster, and +* an email server. + +To run RADAR-CNS stack on a single node setup: + 1. Navigate to `radar-hadoop-cp-stack` + + ```shell + $ cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ + ``` + 2. Hadoop requires an external network. Create a network named `hadoop` + + ```shell + $ docker network create hadoop + ``` + 3. Configure monitor settings in radar.yml + + ``` + battery_monitor: + level: CRITICAL + email_address: + email_host: smtp + email_port: 25 + email_user: user@example.com + topics: + - android_empatica_e4_battery_level + ``` + 4. Create `smtp.env` and configure your email settings following `smtp.env.template` + 5. (Optional) Modify topics, flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` + + ``` + topics= + flush.size= + topics.dir= + ``` + 6. (Optional) Modify topics and mongo db configuration for Cold storage in `sink-radar.properties` + + ``` + # Topics that will be consumed + topics= + # MongoDB configuration + mongo.username= + mongo.password= + mongo.database=mydbase + ``` + 7. Start the stack + + ``` + $ sudo docker-compose up -d + ``` + +To stop RADAR-CNS stack on a single node setup: +```shell +$ cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ +$ docker-compose down +``` ### Kerberized stack In this setup, Kerberos is used to secure the connections between the Kafka brokers, Zookeeper and the Kafka REST API. Unfortunately, the Kerberos container from Confluent is not publicly available, so an alternative has to be found here. From 06f41191333ba25261cd11120e4ae753a8759ff3 Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Fri, 20 Jan 2017 15:31:18 +0100 Subject: [PATCH 035/197] typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ae1a9e411..5752449e1 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,13 @@ # RADAR-Docker -The dockerized RADAR stack or deploying the RADAR-CNS platform. Component repositories can be found here [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) +The dockerized RADAR stack for deploying the RADAR-CNS platform. Component repositories can be found here [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) ## Installation instructions To install RADAR-CNS stack, do the following: 1. Install Docker Engine and verify your installation. * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) - * Installation for Windows ( Follow [installer](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) from Docker) + * Installation for Windows ( Follow [installer](https://docs.docker.com/docker-for-windows/ from Docker) * Installation for Ubuntu (Follow our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) page) * For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). 2. Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). From b2f13d5c0f5e8d190d55c42ab0ccf0cf71acca88 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 20 Jan 2017 16:22:23 +0000 Subject: [PATCH 036/197] Rest-API and HotStorage integration --- README.md | 21 +++++++++++++-- .../radar-cp-hadoop-stack/docker-compose.yml | 26 +++++++++++++------ ...radar.properties => sink-mongo.properties} | 8 +++--- 3 files changed, 41 insertions(+), 14 deletions(-) rename dcompose-stack/radar-cp-hadoop-stack/{sink-radar.properties => sink-mongo.properties} (89%) diff --git a/README.md b/README.md index 5752449e1..a0052b54b 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,23 @@ To run RADAR-CNS stack on a single node setup: flush.size= topics.dir= ``` - 6. (Optional) Modify topics and mongo db configuration for Cold storage in `sink-radar.properties` + 6. Configure the Hot Storage settings in `docker-compose.yml` + + ``` + RADAR_USER: + RADAR_PWD: + RADAR_DB: + ``` + > **Note**: These properties are used to initialise a MongoDb database from scratch + 7. Configure the Rest-API settings in `docker-compose.yml` to connect to Hot Storage + + ``` + MONGODB_USER: + MONGODB_PASS: + MONGODB_DATABASE: + ``` + > **Note**: These properties must have the same values stated at point 6 + 8. Modify topics and mongo db configuration for Cold storage in `sink-mongo.properties` ``` # Topics that will be consumed @@ -91,8 +107,9 @@ To run RADAR-CNS stack on a single node setup: # MongoDB configuration mongo.username= mongo.password= - mongo.database=mydbase + mongo.database= ``` + > **Note**: These properties must have the same values stated at point 6 7. Start the stack ``` diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 60c284c37..8143bb2ed 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -114,24 +114,34 @@ services: #---------------------------------------------------------------------------# # RADAR Hot Storage # #---------------------------------------------------------------------------# - mongo: - image: mongo:3.2.10 + hotstorage: + image: radarcns/radar-hotstorage:0.1 networks: - api ports: - "27017:27017" + - "28017:28017" + environment: + RADAR_USER: + RADAR_PWD: + RADAR_DB: #---------------------------------------------------------------------------# # RADAR REST API # #---------------------------------------------------------------------------# - tomcat: - image: tomcat:8.0.37 + rest-api: + image: radarcns/radar-restapi:0.1 networks: - api ports: - "8080:8080" depends_on: - - mongo + - hotstorage + environment: + MONGODB_USER: + MONGODB_PASS: + MONGODB_DATABASE: + MONGODB_HOST: hotstorage:27017 #---------------------------------------------------------------------------# # RADAR Dashboard # @@ -143,7 +153,7 @@ services: ports: - "3000:3000" depends_on: - - tomcat + - rest-api #---------------------------------------------------------------------------# @@ -200,13 +210,13 @@ services: image: radarcns/radar-mongodb-connector:0.1 restart: on-failure:3 volumes: - - ./sink-radar.properties:/etc/kafka-connect/sink.properties + - ./sink-mongo.properties:/etc/kafka-connect/sink.properties networks: - zookeeper - kafka - api depends_on: - - mongo + - hotstorage - zookeeper-1 - kafka-1 - kafka-2 diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties similarity index 89% rename from dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties rename to dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties index f4b641e20..05aee18d8 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/sink-radar.properties +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties @@ -9,13 +9,13 @@ tasks.max=1 topics=android_empatica_e4_battery_level,android_empatica_e4_battery_level_output # MongoDB server -mongo.host=mongo +mongo.host=hotstorage mongo.port=27017 # MongoDB configuration -#mongo.username= -#mongo.password= -mongo.database=mydbase +mongo.username= +mongo.password= +mongo.database= # Collection name for putting data into the MongoDB database. The {$topic} token will be replaced # by the Kafka topic name. From 09a78f42ae7a3317c36284bfecd4930735ab79b8 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 20 Jan 2017 17:24:16 +0000 Subject: [PATCH 037/197] New MongoDB configuration --- README.md | 24 +++++++------------ dcompose-stack/radar-cp-hadoop-stack/.env | 3 +++ .../radar-cp-hadoop-stack/docker-compose.yml | 12 +++++----- 3 files changed, 17 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index a0052b54b..e15bfe320 100644 --- a/README.md +++ b/README.md @@ -83,23 +83,15 @@ To run RADAR-CNS stack on a single node setup: flush.size= topics.dir= ``` - 6. Configure the Hot Storage settings in `docker-compose.yml` + 6. Configure Hot Storage settings in `.env` file ``` - RADAR_USER: - RADAR_PWD: - RADAR_DB: + HOTSTORAGE_USERNAME= + HOTSTORAGE_PASSWORD=XXXXXXXX + HOTSTORAGE_NAME= ``` - > **Note**: These properties are used to initialise a MongoDb database from scratch - 7. Configure the Rest-API settings in `docker-compose.yml` to connect to Hot Storage - - ``` - MONGODB_USER: - MONGODB_PASS: - MONGODB_DATABASE: - ``` - > **Note**: These properties must have the same values stated at point 6 - 8. Modify topics and mongo db configuration for Cold storage in `sink-mongo.properties` + > **Note**: These properties are used to initialise a MongoDB database from scratch and to establish a connection for Rest-API + 7. Modify topics and MongoDB configuration for Hot storage in `sink-mongo.properties` ``` # Topics that will be consumed @@ -109,8 +101,8 @@ To run RADAR-CNS stack on a single node setup: mongo.password= mongo.database= ``` - > **Note**: These properties must have the same values stated at point 6 - 7. Start the stack + > **Note**: The MongoDB configuration must mirror `.env` file parameters stated at point 6 + 8. Start the stack ``` $ sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 7a1cbdf58..6fc6d8c40 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -2,3 +2,6 @@ HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 +HOTSTORAGE_USERNAME= +HOTSTORAGE_PASSWORD=XXXXXXXX +HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 8143bb2ed..3ebc960c2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -122,9 +122,9 @@ services: - "27017:27017" - "28017:28017" environment: - RADAR_USER: - RADAR_PWD: - RADAR_DB: + RADAR_USER: ${HOTSTORAGE_USERNAME} + RADAR_PWD: ${HOTSTORAGE_PASSWORD} + RADAR_DB: ${HOTSTORAGE_NAME} #---------------------------------------------------------------------------# # RADAR REST API # @@ -138,9 +138,9 @@ services: depends_on: - hotstorage environment: - MONGODB_USER: - MONGODB_PASS: - MONGODB_DATABASE: + MONGODB_USER: ${HOTSTORAGE_USERNAME} + MONGODB_PASS: ${HOTSTORAGE_PASSWORD} + MONGODB_DATABASE: ${HOTSTORAGE_NAME} MONGODB_HOST: hotstorage:27017 #---------------------------------------------------------------------------# From f5746f4879dfd40843c41c80e2d3b7d295730f97 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 20 Jan 2017 17:27:17 +0000 Subject: [PATCH 038/197] Fix notes --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e15bfe320..9631a7e5f 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ To run RADAR-CNS stack on a single node setup: HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= ``` - > **Note**: These properties are used to initialise a MongoDB database from scratch and to establish a connection for Rest-API + > **Note**: These properties are used to initialise a MongoDB database from scratch and to establish a connection between MongoDB and Rest-API 7. Modify topics and MongoDB configuration for Hot storage in `sink-mongo.properties` ``` @@ -101,7 +101,7 @@ To run RADAR-CNS stack on a single node setup: mongo.password= mongo.database= ``` - > **Note**: The MongoDB configuration must mirror `.env` file parameters stated at point 6 + > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 8. Start the stack ``` From 19a9b79e2d468602d9bae1ae5688b76568611dfe Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 20 Jan 2017 17:28:52 +0000 Subject: [PATCH 039/197] Fix mongo parameter hints --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9631a7e5f..53ba78c67 100644 --- a/README.md +++ b/README.md @@ -97,9 +97,9 @@ To run RADAR-CNS stack on a single node setup: # Topics that will be consumed topics= # MongoDB configuration - mongo.username= - mongo.password= - mongo.database= + mongo.username= + mongo.password=XXXXXXXX + mongo.database= ``` > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 8. Start the stack From 09686fee4ff30fac8128363e612e360d9232ccd3 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 23 Jan 2017 09:54:04 +0100 Subject: [PATCH 040/197] Review of the README file --- README.md | 117 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index 53ba78c67..f6cec349b 100644 --- a/README.md +++ b/README.md @@ -5,19 +5,25 @@ The dockerized RADAR stack for deploying the RADAR-CNS platform. Component repos ## Installation instructions To install RADAR-CNS stack, do the following: -1. Install Docker Engine and verify your installation. - * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) - * Installation for Windows ( Follow [installer](https://docs.docker.com/docker-for-windows/ from Docker) - * Installation for Ubuntu (Follow our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) page) - * For other Linux distributions, see [the list by Docker](https://docs.docker.com/engine/installation/). -2. Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). -3. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. +1. Install Docker Engine + * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) + * Installation for Windows ( Follow [installer](https://docs.docker.com/docker-for-windows/ from Docker) + * Installation for Ubuntu (Follow our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) page) + * For other Linux distributions, install Docker engine from [the list by Docker](https://docs.docker.com/engine/installation/). Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). +2. Verify the Docker installation by running on the command-line: + + ```shell + sudo docker --version + sudo docker-compose --version + ``` + This should show Docker version 1.12 or later and docker-compose version 1.9.0 or later. +3. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. 4. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. - ``` - git clone https://github.com/RADAR-CNS/RADAR-Docker.git - ``` -5. Install required component stack following the instructions below. + ```shell + git clone https://github.com/RADAR-CNS/RADAR-Docker.git + ``` +5. Install required component stack following the instructions below. ## Usage @@ -26,22 +32,28 @@ RADAR-Docker currently offers two component stacks to run. 1. A Docker-compose for components from [Confluent Kafka Platform](http://docs.confluent.io/3.1.0/) community 2. A Docker-compose for components from RADAR-CNS platform. +> **Note**: on macOS, remove `sudo` from all `docker` and `docker-compose` commands in the usage instructions below. + ### Confluent Kafka platform Confluent Kafka platform offers integration of the basic components for streaming such as Zookeeper, Kafka brokers, Schema registry and REST-Proxy. -To run this stack on a single-node setup: +Run this stack in a single-node setup on the command-line: + ```shell -$ cd RADAR-Docker/dcompose-stack/radar-cp-stack/ -$ docker-compose up +cd RADAR-Docker/dcompose-stack/radar-cp-stack/ +sudo docker-compose up -d ``` -To stop this stack: +To stop this stack, run: + ```shell -$ docker-compose down +sudo docker-compose down ``` + ### RADAR-CNS platform -In addition to Confluent Kafka platform compoents, RADAR-CNS platform offers +In addition to Confluent Kafka platform components, RADAR-CNS platform offers + * RADAR-HDFS-Connector - Cold storage of selected streams in Hadoop data storage, * RADAR-MongoDB-Connector - Hot storage of selected streams in MongoDB, * [RADAR-Dashboard](https://github.com/RADAR-CNS/RADAR-Dashboard), @@ -52,67 +64,74 @@ In addition to Confluent Kafka platform compoents, RADAR-CNS platform offers * a Hadoop cluster, and * an email server. -To run RADAR-CNS stack on a single node setup: - 1. Navigate to `radar-hadoop-cp-stack` - - ```shell - $ cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ - ``` - 2. Hadoop requires an external network. Create a network named `hadoop` +To run RADAR-CNS stack in a single node setup: + +1. Navigate to `radar-hadoop-cp-stack`: + + ```shell + cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ + ``` +2. Hadoop requires an external network. Create a network named `hadoop`: - ```shell - $ docker network create hadoop - ``` - 3. Configure monitor settings in radar.yml + ```shell + sudo docker network create hadoop + ``` +3. Configure monitor settings in radar.yml ``` battery_monitor: level: CRITICAL - email_address: + email_address: notify-me@example.com email_host: smtp email_port: 25 email_user: user@example.com topics: - android_empatica_e4_battery_level ``` - 4. Create `smtp.env` and configure your email settings following `smtp.env.template` - 5. (Optional) Modify topics, flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` +4. Create `smtp.env` and configure your email settings following `smtp.env.template`. Configure alternative mail providers like Amazon SES or Gmail by using the parameters of the [`namshi/smtp` Docker image](https://hub.docker.com/r/namshi/smtp/). +5. (Optional) Modify topics, flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` - ``` - topics= + ```ini + topics=topic1,topic2 flush.size= - topics.dir= + topics.dir=/path/to/data ``` - 6. Configure Hot Storage settings in `.env` file +6. Configure Hot Storage settings in `.env` file - ``` - HOTSTORAGE_USERNAME= + ```ini + HOTSTORAGE_USERNAME=mongodb-user HOTSTORAGE_PASSWORD=XXXXXXXX - HOTSTORAGE_NAME= + HOTSTORAGE_NAME=mongodb-database ``` > **Note**: These properties are used to initialise a MongoDB database from scratch and to establish a connection between MongoDB and Rest-API - 7. Modify topics and MongoDB configuration for Hot storage in `sink-mongo.properties` +7. Modify topics and MongoDB configuration for Hot storage in `sink-mongo.properties` - ``` + ```ini # Topics that will be consumed - topics= + topics=topic1,topic2 # MongoDB configuration - mongo.username= + mongo.username=mongodb-user mongo.password=XXXXXXXX - mongo.database= + mongo.database=mongodb-database ``` > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 - 8. Start the stack +8. Start the stack + ```shell + sudo docker-compose up -d --build ``` - $ sudo docker-compose up -d - ``` -To stop RADAR-CNS stack on a single node setup: +To stop RADAR-CNS stack on a single node setup, run + ```shell -$ cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ -$ docker-compose down +cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ +sudo docker-compose down ``` + +## Work in progress + +The two following stacks will not work on with only Docker and docker-compose. For the Kerberos stack, the Kerberos image is not public. For the multi-host setup, also docker-swarm and Docker beta versions are needed. + ### Kerberized stack In this setup, Kerberos is used to secure the connections between the Kafka brokers, Zookeeper and the Kafka REST API. Unfortunately, the Kerberos container from Confluent is not publicly available, so an alternative has to be found here. From 531e38862609f028ad4acb640065b31054460be9 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 23 Jan 2017 09:54:40 +0100 Subject: [PATCH 041/197] README code coloring --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f6cec349b..4a1aea68e 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ To run RADAR-CNS stack in a single node setup: ``` 3. Configure monitor settings in radar.yml - ``` + ```yaml battery_monitor: level: CRITICAL email_address: notify-me@example.com From 0d91fbecb8f9c920eff55332df20fc26a92036d5 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 23 Jan 2017 10:46:40 +0100 Subject: [PATCH 042/197] Added disconnect monitor settings to file --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a1aea68e..c191bc636 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ To run RADAR-CNS stack in a single node setup: ```shell sudo docker network create hadoop ``` -3. Configure monitor settings in radar.yml +3. Configure monitor settings in `radar.yml`: ```yaml battery_monitor: @@ -87,6 +87,17 @@ To run RADAR-CNS stack in a single node setup: email_user: user@example.com topics: - android_empatica_e4_battery_level + disconnect_monitor: + # timeout in milliseconds -> 5 minutes + timeout: 300000 + email_address: notify-me@example.com + email_host: smtp + email_port: 25 + email_user: user@example.com + # temperature readings are sent very regularly, but + # not too often. + topics: + - android_empatica_e4_temperature ``` 4. Create `smtp.env` and configure your email settings following `smtp.env.template`. Configure alternative mail providers like Amazon SES or Gmail by using the parameters of the [`namshi/smtp` Docker image](https://hub.docker.com/r/namshi/smtp/). 5. (Optional) Modify topics, flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` From 39ca8fe2d347cd9efec814b13877a3a62f9e3822 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 23 Jan 2017 16:01:25 +0000 Subject: [PATCH 043/197] Added container to initialise topics --- .../kafka-radarinit/Dockerfile | 7 +++ .../kafka-radarinit/topic_init.sh | 48 +++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile create mode 100755 dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile new file mode 100644 index 000000000..75bd1c1f6 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile @@ -0,0 +1,7 @@ +FROM confluentinc/cp-kafka:3.1.1 + +COPY ./topic_init.sh /home/ + +RUN chmod +x /home/topic_init.sh + +CMD ["/etc/confluent/docker/run", "./home/topic_init.sh", "shutdown -h now"] diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh new file mode 100755 index 000000000..a6f0b7e6e --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Check if first execution +if [ -f /home/.radar_topic_set ]; then + echo "*********************************************" + echo "** RADAR-CNS topics are ready to be used **" + echo "*********************************************" + exit 0 +fi + +# Check if variables exist +if [ -z "$RADAR_TOPICS" ]; then + echo "$RADAR_TOPICS is not defined" + exit 126 +fi + +if [ -z "$KAFKA_ZOOKEEPER_CONNECT" ]; then + echo "$KAFKA_ZOOKEEPER_CONNECT is not defined" + exit 126 +fi + +if [ -z "$RADAR_PARTITIONS" ]; then + echo "$PARTITIONS is not defined" + exit 126 +fi + +if [ -z "$RADAR_REPLICATION_FACTOR" ]; then + echo "$PARTITIONS is not defined" + exit 126 +fi + +# Create topics +echo "Creating RADAR-CNS topicsi..." +IFS=', ' read -r -a array <<< "$RADAR_TOPICS" + +for element in "${array[@]}" +do + echo "===> Creating $element" + kafka-topics --zookeeper $KAFKA_ZOOKEEPER_CONNECT --create --topic $element --partitions $RADAR_PARTITIONS --replication-factor $RADAR_REPLICATION_FACTOR --if-not-exists +done + +touch /home/.radar_topic_set + +echo "Topics created!" + +echo "*******************************************" +echo "** RADAR-CNS topics have been created **" +echo "*******************************************" From 1737524d2720391be2adf362269b70f385af6d14 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 23 Jan 2017 18:58:21 +0000 Subject: [PATCH 044/197] Kafka-Init waits that all brokers are up&running before creating topics --- .../radar-cp-hadoop-stack/docker-compose.yml | 22 +++++++++++++++++++ .../kafka-radarinit/Dockerfile | 10 +++++++-- .../kafka-radarinit/topic_init.sh | 20 ++++++++++++++--- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 3a4300477..c46cd44c9 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -44,6 +44,7 @@ services: KAFKA_BROKER_ID: 1 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" kafka-2: image: confluentinc/cp-kafka:3.1.1 @@ -56,6 +57,7 @@ services: KAFKA_BROKER_ID: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" kafka-3: image: confluentinc/cp-kafka:3.1.1 @@ -68,6 +70,7 @@ services: KAFKA_BROKER_ID: 3 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092 + KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" #---------------------------------------------------------------------------# # Schema Registry # @@ -110,6 +113,25 @@ services: KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 KAFKA_REST_HOST_NAME: rest-proxy-1 + #---------------------------------------------------------------------------# + # Kafka Init # + #---------------------------------------------------------------------------# + kafka-init: + build: kafka-radarinit + image: radarcns/kafka-init:3.1.1 + networks: + - kafka + - zookeeper + depends_on: + - rest-proxy-1 + environment: + KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 + KAFKA_REST_PROXY: http://rest-proxy-1:8082 + KAFKA_BROKERS: 3 + RADAR_TOPICS: "android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output" + RADAR_PARTITIONS: 3 + RADAR_REPLICATION_FACTOR: 3 + #---------------------------------------------------------------------------# # RADAR Hot Storage # diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile index 75bd1c1f6..47e9a7c19 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile @@ -1,7 +1,13 @@ FROM confluentinc/cp-kafka:3.1.1 -COPY ./topic_init.sh /home/ +# Update aptitude with new repo +RUN apt-get update + +# Install jq to parse JSON +RUN apt-get install -y jq +# Copy bash file +COPY ./topic_init.sh /home/ RUN chmod +x /home/topic_init.sh -CMD ["/etc/confluent/docker/run", "./home/topic_init.sh", "shutdown -h now"] +CMD ["./home/topic_init.sh", "shutdown -h now"] \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index a6f0b7e6e..ddb625145 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -8,6 +8,20 @@ if [ -f /home/.radar_topic_set ]; then exit 0 fi +# Wait untill all brokers are up & running +while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do + BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers | jq '.brokers') + BROKERS="$(echo -e "${BROKERS}" | tr -d '[:space:]')" + BROKERS="${BROKERS:1}" + + LENGTH=0 + IFS=',' read -r -a array <<< $BROKERS + for element in "${array[@]}" + do + LENGTH=${#array[@]} + done +done + # Check if variables exist if [ -z "$RADAR_TOPICS" ]; then echo "$RADAR_TOPICS is not defined" @@ -20,17 +34,17 @@ if [ -z "$KAFKA_ZOOKEEPER_CONNECT" ]; then fi if [ -z "$RADAR_PARTITIONS" ]; then - echo "$PARTITIONS is not defined" + echo "$RADAR_PARTITIONS is not defined" exit 126 fi if [ -z "$RADAR_REPLICATION_FACTOR" ]; then - echo "$PARTITIONS is not defined" + echo "$RADAR_REPLICATION_FACTOR is not defined" exit 126 fi # Create topics -echo "Creating RADAR-CNS topicsi..." +echo "Creating RADAR-CNS topics..." IFS=', ' read -r -a array <<< "$RADAR_TOPICS" for element in "${array[@]}" From bef85bb95e10601136369862c0aefd8d00530fee Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 23 Jan 2017 19:06:28 +0000 Subject: [PATCH 045/197] Fix dependences --- .../radar-cp-hadoop-stack/docker-compose.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index c46cd44c9..09ee355ad 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -123,6 +123,9 @@ services: - kafka - zookeeper depends_on: + - kafka-1 + - kafka-2 + - kafka-3 - rest-proxy-1 environment: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 @@ -238,13 +241,14 @@ services: - kafka - api depends_on: - - hotstorage - zookeeper-1 - kafka-1 - kafka-2 - kafka-3 - schema-registry-1 - rest-proxy-1 + - kafka-init + - hotstorage environment: CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 CONNECT_REST_PORT: 8082 @@ -282,6 +286,7 @@ services: - kafka-3 - schema-registry-1 - rest-proxy-1 + - kafka-init - hdfs-datanode-1 - hdfs-datanode-2 - hdfs-namenode @@ -318,6 +323,7 @@ services: - kafka-2 - kafka-3 - schema-registry-1 + - kafka-init volumes: - ./radar.yml:/etc/radar.yml @@ -338,6 +344,7 @@ services: - kafka-2 - kafka-3 - schema-registry-1 + - kafka-init - smtp volumes: - ./radar.yml:/etc/radar.yml @@ -354,6 +361,7 @@ services: - "/var/run:/var/run:rw" - "/sys:/sys:ro" - "/var/lib/docker/:/var/lib/docker:ro" + portainer: image: portainer/portainer:1.11.1 ports: From 5fdd47750eba28f3b9efbb19d18243930b293982 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 23 Jan 2017 19:22:39 +0000 Subject: [PATCH 046/197] Moved topic list in .env file and updated README --- README.md | 11 ++++++++++- dcompose-stack/radar-cp-hadoop-stack/.env | 1 + .../radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c191bc636..99b50865a 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,16 @@ To run RADAR-CNS stack in a single node setup: mongo.database=mongodb-database ``` > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 -8. Start the stack +8. For secuirity reasons, the `auto.creation.topics.enable` has been set to `false`. To create the needed topic, modify the comma separated list parametern `RADAR_TOPIC_LIST` in `.env` file + + ```ini + RADAR_TOPIC_LIST=topic1, topic2 + ``` + > **Note**: In order to support Empatica E4 integration `RADAR_TOPIC_LIST` has to be set to + ```ini + RADAR_TOPICS: android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output + ``` +9. Start the stack ```shell sudo docker-compose up -d --build diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 6fc6d8c40..4cb8eac66 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -5,3 +5,4 @@ HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= +RADAR_TOPIC_LIST=topic1, topic2 diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 09ee355ad..c98e1bac5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -131,7 +131,7 @@ services: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_PROXY: http://rest-proxy-1:8082 KAFKA_BROKERS: 3 - RADAR_TOPICS: "android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output" + RADAR_TOPICS: ${RADAR_TOPIC_LIST} RADAR_PARTITIONS: 3 RADAR_REPLICATION_FACTOR: 3 @@ -361,7 +361,7 @@ services: - "/var/run:/var/run:rw" - "/sys:/sys:ro" - "/var/lib/docker/:/var/lib/docker:ro" - + portainer: image: portainer/portainer:1.11.1 ports: From f408ae1566ce8b8c59f071635df6a9930dd6600a Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 23 Jan 2017 19:41:40 +0000 Subject: [PATCH 047/197] Fix param name and added sleep interval to busy waiting loop --- README.md | 2 +- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 99b50865a..b8f6caf5a 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,7 @@ To run RADAR-CNS stack in a single node setup: ``` > **Note**: In order to support Empatica E4 integration `RADAR_TOPIC_LIST` has to be set to ```ini - RADAR_TOPICS: android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output + RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output ``` 9. Start the stack diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index ddb625145..d771604c7 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -9,6 +9,7 @@ if [ -f /home/.radar_topic_set ]; then fi # Wait untill all brokers are up & running +INTERVAL=1 while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers | jq '.brokers') BROKERS="$(echo -e "${BROKERS}" | tr -d '[:space:]')" @@ -20,6 +21,13 @@ while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do do LENGTH=${#array[@]} done + + if [ "$LENGTH" != "$KAFKA_BROKERS" ]; then + sleep $INTERVAL + if (( INTERVAL < 3 )); then + ((INTERVAL++)) + fi + fi done # Check if variables exist From 6f263d67b38c4f101723bd8d72d41bbd6e0be471 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 24 Jan 2017 14:01:10 +0000 Subject: [PATCH 048/197] Fix error messages --- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index d771604c7..356fc7b68 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -32,22 +32,22 @@ done # Check if variables exist if [ -z "$RADAR_TOPICS" ]; then - echo "$RADAR_TOPICS is not defined" + echo "RADAR_TOPICS is not defined" exit 126 fi if [ -z "$KAFKA_ZOOKEEPER_CONNECT" ]; then - echo "$KAFKA_ZOOKEEPER_CONNECT is not defined" + echo "KAFKA_ZOOKEEPER_CONNECT is not defined" exit 126 fi if [ -z "$RADAR_PARTITIONS" ]; then - echo "$RADAR_PARTITIONS is not defined" + echo "RADAR_PARTITIONS is not defined" exit 126 fi if [ -z "$RADAR_REPLICATION_FACTOR" ]; then - echo "$RADAR_REPLICATION_FACTOR is not defined" + echo "RADAR_REPLICATION_FACTOR is not defined" exit 126 fi From cf11562a980ca5289e046e750556a7d3ca31a7fd Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 24 Jan 2017 17:15:58 +0000 Subject: [PATCH 049/197] Fixed env variable to support the topic validation script For more details check RADAR-CNS/RADAR-Backend@c101695 --- README.md | 7 ++----- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- .../radar-cp-hadoop-stack/docker-compose.yml | 14 ++++++++++++-- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b8f6caf5a..d10b870cb 100644 --- a/README.md +++ b/README.md @@ -126,15 +126,12 @@ To run RADAR-CNS stack in a single node setup: mongo.database=mongodb-database ``` > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 -8. For secuirity reasons, the `auto.creation.topics.enable` has been set to `false`. To create the needed topic, modify the comma separated list parametern `RADAR_TOPIC_LIST` in `.env` file +8. (Optional) For secuirity reasons, the `auto.creation.topics.enable` has been set to `false`. To create the required topics, modify the comma separated list parameter `RADAR_TOPIC_LIST` in `.env` file ```ini RADAR_TOPIC_LIST=topic1, topic2 ``` - > **Note**: In order to support Empatica E4 integration `RADAR_TOPIC_LIST` has to be set to - ```ini - RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output - ``` + > **Note**: The parameter has been already set up to support Empatica E4 integration. 9. Start the stack ```shell diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 4cb8eac66..fab55092f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -5,4 +5,4 @@ HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= -RADAR_TOPIC_LIST=topic1, topic2 +RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index c98e1bac5..21644305f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -251,7 +251,7 @@ services: - hotstorage environment: CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 - CONNECT_REST_PORT: 8082 + CONNECT_REST_PORT: 8083 CONNECT_GROUP_ID: "default" CONNECT_CONFIG_STORAGE_TOPIC: "default.config" CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" @@ -265,6 +265,8 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-mongodb-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_REST_PROXY: http://rest-proxy-1:8082 + TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# # RADAR HDFS connector # @@ -292,7 +294,7 @@ services: - hdfs-namenode environment: CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 - CONNECT_REST_PORT: 8082 + CONNECT_REST_PORT: 8083 CONNECT_GROUP_ID: "default" CONNECT_CONFIG_STORAGE_TOPIC: "default.config" CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" @@ -306,6 +308,8 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_REST_PROXY: http://rest-proxy-1:8082 + TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# # RADAR backend streams # @@ -326,6 +330,9 @@ services: - kafka-init volumes: - ./radar.yml:/etc/radar.yml + environment: + KAFKA_REST_PROXY: http://rest-proxy-1:8082 + TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# # RADAR backend monitor # @@ -348,6 +355,9 @@ services: - smtp volumes: - ./radar.yml:/etc/radar.yml + environment: + KAFKA_REST_PROXY: http://rest-proxy-1:8082 + TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# # Docker Monitoring # From 1d2d535817da3bb278d07a9899f6047372bf0f18 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 25 Jan 2017 06:46:31 +0000 Subject: [PATCH 050/197] Fixed Rest-Proxy parameter for HDFS connector --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 21644305f..c56ba9539 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -294,7 +294,7 @@ services: - hdfs-namenode environment: CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 - CONNECT_REST_PORT: 8083 + CONNECT_REST_PORT: 8084 CONNECT_GROUP_ID: "default" CONNECT_CONFIG_STORAGE_TOPIC: "default.config" CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" @@ -308,7 +308,7 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_REST_PROXY: http://rest-proxy-1:8082 + KAFKA_REST_PROXY: "http://rest-proxy-1:8082" TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# From 09a8798ce3c6a2d59a9a2987857041a08bfac583 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 25 Jan 2017 09:32:42 +0000 Subject: [PATCH 051/197] New Backend repositories --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index c56ba9539..2067a978d 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -232,7 +232,7 @@ services: # RADAR mongo connector # #---------------------------------------------------------------------------# radar-mongodb-connector: - image: radarcns/radar-mongodb-connector:0.1 + image: radarcns/radar-mongodb-connector-auto:0.1 restart: on-failure:3 volumes: - ./sink-mongo.properties:/etc/kafka-connect/sink.properties @@ -272,7 +272,7 @@ services: # RADAR HDFS connector # #---------------------------------------------------------------------------# radar-hdfs-connector: - image: radarcns/radar-hdfs-connector:0.1 + image: radarcns/radar-hdfs-connector-auto:0.1 restart: on-failure:3 volumes: - ./sink-hdfs.properties:/etc/kafka-connect/sink.properties @@ -315,7 +315,7 @@ services: # RADAR backend streams # #---------------------------------------------------------------------------# radar-backend-stream: - image: radarcns/radar-backend-kafka:0.1 + image: radarcns/radar-backend-kafka-auto:0.1 command: - stream networks: @@ -338,7 +338,7 @@ services: # RADAR backend monitor # #---------------------------------------------------------------------------# radar-backend-monitor: - image: radarcns/radar-backend-kafka:0.1 + image: radarcns/radar-backend-kafka-auto:0.1 command: - monitor networks: From 53a1a81e3504998b78d72530d6667d065b5e5855 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 25 Jan 2017 10:58:06 +0100 Subject: [PATCH 052/197] add disconnect-monitor template to radar.yml --- .../radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- dcompose-stack/radar-cp-hadoop-stack/radar.yml | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 2067a978d..5e34ba85c 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -264,7 +264,7 @@ services: CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-mongodb-connector" - CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} @@ -307,7 +307,7 @@ services: CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" - CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181 + CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_PROXY: "http://rest-proxy-1:8082" TOPIC_LIST: ${RADAR_TOPIC_LIST} diff --git a/dcompose-stack/radar-cp-hadoop-stack/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/radar.yml index a372883e2..462878443 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/radar.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/radar.yml @@ -39,9 +39,23 @@ schema_registry: #========================= Battery level monitor=========================# battery_monitor: level: CRITICAL - email_address: notifiersemail + email_address: + - notifiersemail + - notifier2@email email_host: smtp email_port: 25 email_user: user@example.com topics: - - android_empatica_e4_battery_level \ No newline at end of file + - android_empatica_e4_battery_level + +#========================= Battery level monitor=========================# +disconnect-monitor: + email_address: + - notifier@email + - notifier2@email + email_host: smtp + email_port: 25 + email_user: user@example.com + topics: + - android_empatica_e4_battery_level + timeout: 60000 \ No newline at end of file From ebcb89f66d5a559bb02f1a35bf0fdc0a251a5c31 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 25 Jan 2017 11:04:25 +0100 Subject: [PATCH 053/197] correct hdfs url --- dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh index 802d98393..5f72edb3a 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh @@ -16,4 +16,4 @@ HDFS_OUTPUT_DIR=/home/output HDFS_COMMAND="hdfs dfs -get $HDFS_FILE $HDFS_OUTPUT_DIR" mkdir -p $OUTPUT_DIR -docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://namenode:8020 uhopper/hadoop:2.7.2 $HDFS_COMMAND +docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://hdfs-namenode:8020 uhopper/hadoop:2.7.2 $HDFS_COMMAND From 1a105df83b17911301c1ed058c804131d5840593 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 25 Jan 2017 12:31:05 +0100 Subject: [PATCH 054/197] typos --- dcompose-stack/radar-cp-hadoop-stack/radar.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/radar.yml index 462878443..91888b681 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/radar.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/radar.yml @@ -49,7 +49,7 @@ battery_monitor: - android_empatica_e4_battery_level #========================= Battery level monitor=========================# -disconnect-monitor: +disconnect_monitor: email_address: - notifier@email - notifier2@email From 444a0f7603b161f0df7094dd16992cca3c50a400 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 25 Jan 2017 12:32:18 +0100 Subject: [PATCH 055/197] add docker-files of backend images --- images/radar-backend-kafka/Dockerfile | 33 ++++++++++ images/radar-backend-kafka/README.md | 30 +++++++++ images/radar-backend-kafka/init.sh | 9 +++ images/radar-backend-kafka/kafka_status.sh | 44 +++++++++++++ images/radar-backend-kafka/radar.yml | 37 +++++++++++ images/radar-hdfs-connector/Dockerfile | 27 ++++++++ images/radar-hdfs-connector/README.md | 44 +++++++++++++ images/radar-hdfs-connector/kafka_status.sh | 44 +++++++++++++ images/radar-hdfs-connector/launch | 47 ++++++++++++++ .../radar-hdfs-connector/sink-hdfs.properties | 8 +++ images/radar-mongodb-connector/Dockerfile | 27 ++++++++ images/radar-mongodb-connector/README.md | 61 +++++++++++++++++++ .../radar-mongodb-connector/kafka_status.sh | 44 +++++++++++++ images/radar-mongodb-connector/launch | 47 ++++++++++++++ .../sink-mongodb.properties | 25 ++++++++ 15 files changed, 527 insertions(+) create mode 100644 images/radar-backend-kafka/Dockerfile create mode 100644 images/radar-backend-kafka/README.md create mode 100755 images/radar-backend-kafka/init.sh create mode 100755 images/radar-backend-kafka/kafka_status.sh create mode 100644 images/radar-backend-kafka/radar.yml create mode 100644 images/radar-hdfs-connector/Dockerfile create mode 100644 images/radar-hdfs-connector/README.md create mode 100755 images/radar-hdfs-connector/kafka_status.sh create mode 100755 images/radar-hdfs-connector/launch create mode 100644 images/radar-hdfs-connector/sink-hdfs.properties create mode 100644 images/radar-mongodb-connector/Dockerfile create mode 100644 images/radar-mongodb-connector/README.md create mode 100755 images/radar-mongodb-connector/kafka_status.sh create mode 100755 images/radar-mongodb-connector/launch create mode 100644 images/radar-mongodb-connector/sink-mongodb.properties diff --git a/images/radar-backend-kafka/Dockerfile b/images/radar-backend-kafka/Dockerfile new file mode 100644 index 000000000..43fe1596e --- /dev/null +++ b/images/radar-backend-kafka/Dockerfile @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM confluentinc/cp-base:3.1.1 + +MAINTAINER Nivethika M , Joris B , Francesco N + +LABEL description="RADAR-CNS Backend streams and monitor" + +# Install RADAR-Backend +RUN echo && echo "==> Installing Components" \ + # Download Git RADAR-Backend release + && echo "==> Downloading RADAR-CNS/RADAR-Backend v0.1-beta.1 release from GitHub" \ + && cd /usr/local && mkdir RADAR-Backend + +ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radarbackend-0.1.jar /usr/share/java/ + +# Load topics validator +COPY ["./init.sh", "./kafka_status.sh", "/home/"] + +VOLUME /etc/radar.yml + +ENTRYPOINT ["./home/init.sh"] diff --git a/images/radar-backend-kafka/README.md b/images/radar-backend-kafka/README.md new file mode 100644 index 000000000..16eaae16f --- /dev/null +++ b/images/radar-backend-kafka/README.md @@ -0,0 +1,30 @@ +# Dockerised RADAR-Backend-Kafka + +It runs the RADAR-CNS Backend Kafka solution based on Kafka Streams 3.1.1, for more details about Kafka Streams click [here](http://docs.confluent.io/3.1.1/streams/index.html). + +Create the docker image: +``` +$ docker build -t radarcns/radar-backend-kafka ./ +``` + +Or pull from dockerhub: +``` +$ docker pull radarcns/radar-backend-kafka:0.1 +``` + +## Configuration + +Edit the radar.yml file to configure either the streams or the monitor. + +## Runtime environment variables + +This container requires two environment variable: + +- `KAFKA_REST_PROXY`: a valid Rest-Proxy instance +- `TOPIC_LIST`: a comma separated list containing all required topic names + +Before starting the streams, it waits until all topics inside TOPIC_LIST are available. This check is performed using the /topic Rest-Proxy API, for more details click here. + +## How to run + +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file diff --git a/images/radar-backend-kafka/init.sh b/images/radar-backend-kafka/init.sh new file mode 100755 index 000000000..788df36ee --- /dev/null +++ b/images/radar-backend-kafka/init.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Busy waiting loop that waits untill all topic are available +echo "===> Waiting RADAR-CNS topics ... " +./home/kafka_status.sh + +# Start streams +echo "===> Starting " $1 "...." +./usr/bin/java -jar /usr/share/java/radarbackend-0.1.jar -c /etc/radar.yml $1 \ No newline at end of file diff --git a/images/radar-backend-kafka/kafka_status.sh b/images/radar-backend-kafka/kafka_status.sh new file mode 100755 index 000000000..dbed2e33f --- /dev/null +++ b/images/radar-backend-kafka/kafka_status.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Check if variables exist +if [ -z "$KAFKA_REST_PROXY" ]; then + echo "KAFKA_REST_PROXY is not defined" + exit 126 +fi + +if [ -z "$TOPIC_LIST" ]; then + echo "TOPIC_LIST is not defined" + exit 126 +fi + +# Fetch env topic list +IFS=', ' read -r -a needed <<< $TOPIC_LIST + +# Fetch env topic list +count=0 +interval=1 +while [ "$count" != "${#needed[@]}" ] ; do + + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi + + count=0 + TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") + TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" + + IFS=',' read -r -a array <<< $TOPICS + for topic in "${array[@]}" + do + for need in "${needed[@]}" + do + if [ "$topic" = "$need" ] ; then + ((count++)) + fi + done + done +done + +echo "All topics are now available. Ready to go!" \ No newline at end of file diff --git a/images/radar-backend-kafka/radar.yml b/images/radar-backend-kafka/radar.yml new file mode 100644 index 000000000..18941a5a1 --- /dev/null +++ b/images/radar-backend-kafka/radar.yml @@ -0,0 +1,37 @@ +version: 1.0 +released: 2016-11-27 + +#============================= Application =============================# +log_path: +#Possible value are standalone or high_performance +mode: standalone + +#============================== Zookeeper ==============================# +#List of Zookeeper instances +zookeeper: + - host: zookeeper-1 + port: 2181 + +#================================ Kafka ================================# +#List of Kafka brokers +broker: + - host: kafka-1 + port: 9092 + +#Kafka internal parameters +auto_commit_interval_ms: 1000 +session_timeout_ms: 10000 + +#============================ Kafka Streams ============================# +#The number of threads that a stream must be run according is priority +stream_priority: + low: 1 + normal: 2 + high: 4 + +#=========================== Schema Registry ===========================# +#List of Schema Registry instances +schema_registry: + - host: schema-registry-1 + port: 8081 + protocol: http \ No newline at end of file diff --git a/images/radar-hdfs-connector/Dockerfile b/images/radar-hdfs-connector/Dockerfile new file mode 100644 index 000000000..9a8b8ee2f --- /dev/null +++ b/images/radar-hdfs-connector/Dockerfile @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM confluentinc/cp-kafka-connect:3.1.1 + +MAINTAINER Nivethika M , Joris B , Francesco N + +LABEL description="RADAR-CNS Backend- HDFS Sink Connector" + +# Deploy RADAR-Backend - Mongodb Sink Connector +ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radar-hdfs-connector-0.1.jar /etc/kafka-connect/jars/ + +# Load topics validator +COPY ./kafka_status.sh /home/kafka_status.sh + +# Load modified launcher +COPY launch /etc/confluent/docker/launch diff --git a/images/radar-hdfs-connector/README.md b/images/radar-hdfs-connector/README.md new file mode 100644 index 000000000..6b9ef06dd --- /dev/null +++ b/images/radar-hdfs-connector/README.md @@ -0,0 +1,44 @@ +# Dockerised RADAR-HDFS-Connector + +It runs the Confluent HDFS Connector 3.1.1 using a custom [RecordWriterProvider](https://github.com/RADAR-CNS/RADAR-Backend/blob/dev/src/main/java/org/radarcns/sink/hdfs/AvroRecordWriterProviderRadar.java) to support RADAR-CNS Avro schemas. For more details about Confluent HDFS Connector click [here](http://docs.confluent.io/3.1.1/connect/connect-hdfs/docs/index.html). + +Create the docker image: +``` +$ docker build -t radarcns/radar-hdfs-connector ./ +``` + +Or pull from dockerhub: +``` +$ docker pull radarcns/radar-hdfs-connector:0.1 +``` + +## Configuration + +This image has to be extended with a volume with appropriate `sink.properties` + +Sample HDFS sink.properties +``` +name=radar-hdfs-sink-android-15000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=topic1, topic2, ... +flush.size=15000 +hdfs.url=hdfs://namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidNew +``` + +## Runtime environment variables + +This container requires two environment variable: + +- `KAFKA_REST_PROXY`: a valid Rest-Proxy instance +- `TOPIC_LIST`: a comma separated list containing all required topic names + +Before starting the streams, it waits until all topics inside TOPIC_LIST are available. This check is performed using the /topic Rest-Proxy API, for more details click here. + +Note that connector's REST_PORT must be different from the one used by Rest-Proxy. + +## How to run + +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file diff --git a/images/radar-hdfs-connector/kafka_status.sh b/images/radar-hdfs-connector/kafka_status.sh new file mode 100755 index 000000000..dbed2e33f --- /dev/null +++ b/images/radar-hdfs-connector/kafka_status.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Check if variables exist +if [ -z "$KAFKA_REST_PROXY" ]; then + echo "KAFKA_REST_PROXY is not defined" + exit 126 +fi + +if [ -z "$TOPIC_LIST" ]; then + echo "TOPIC_LIST is not defined" + exit 126 +fi + +# Fetch env topic list +IFS=', ' read -r -a needed <<< $TOPIC_LIST + +# Fetch env topic list +count=0 +interval=1 +while [ "$count" != "${#needed[@]}" ] ; do + + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi + + count=0 + TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") + TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" + + IFS=',' read -r -a array <<< $TOPICS + for topic in "${array[@]}" + do + for need in "${needed[@]}" + do + if [ "$topic" = "$need" ] ; then + ((count++)) + fi + done + done +done + +echo "All topics are now available. Ready to go!" \ No newline at end of file diff --git a/images/radar-hdfs-connector/launch b/images/radar-hdfs-connector/launch new file mode 100755 index 000000000..636534a33 --- /dev/null +++ b/images/radar-hdfs-connector/launch @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# +# Copyright 2016 Confluent Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. +if [ -z "$KAFKA_JMX_OPTS" ]; then + export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi + +# The JMX client needs to be able to connect to java.rmi.server.hostname. +# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. +# For host n/w, this is the IP that the hostname on the host resolves to. + +# If you have more that one n/w configured, hostname -i gives you all the IPs, +# the default is to pick the first IP (or network). +export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} + +if [ "$KAFKA_JMX_PORT" ]; then + # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. + export JMX_PORT=$KAFKA_JMX_PORT + export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" +fi + +# Busy waiting loop that waits untill all topic are available +echo "===> Waiting RADAR-CNS topics ... " +./home/kafka_status.sh + +echo "===> Launching ${COMPONENT} ... new" +# Add external jars to the classpath +# And this also makes sure that the CLASSPATH does not start with ":/etc/..." +# because this causes the plugin scanner to scan the entire disk. +export CLASSPATH="/etc/${COMPONENT}/jars/*" +echo $CLASSPATH +exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties /etc/"${COMPONENT}"/sink.properties diff --git a/images/radar-hdfs-connector/sink-hdfs.properties b/images/radar-hdfs-connector/sink-hdfs.properties new file mode 100644 index 000000000..7a8649520 --- /dev/null +++ b/images/radar-hdfs-connector/sink-hdfs.properties @@ -0,0 +1,8 @@ +name=radar-hdfs-sink-android-15000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_temperature +flush.size=15000 +hdfs.url=hdfs://namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidNew \ No newline at end of file diff --git a/images/radar-mongodb-connector/Dockerfile b/images/radar-mongodb-connector/Dockerfile new file mode 100644 index 000000000..850732bbf --- /dev/null +++ b/images/radar-mongodb-connector/Dockerfile @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM confluentinc/cp-kafka-connect:3.1.1 + +MAINTAINER Nivethika M , Joris B , Francesco N + +LABEL description="RADAR-CNS Backend- MongoDB Sink Connector" + +# Deploy RADAR-Backend - Mongodb Sink Connector +ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radar-mongodb-connector-0.1.jar /etc/kafka-connect/jars/ + +# Load topics validator +COPY ./kafka_status.sh /home/kafka_status.sh + +# Load modified launcher +COPY launch /etc/confluent/docker/launch diff --git a/images/radar-mongodb-connector/README.md b/images/radar-mongodb-connector/README.md new file mode 100644 index 000000000..6745a6325 --- /dev/null +++ b/images/radar-mongodb-connector/README.md @@ -0,0 +1,61 @@ +# Dockerised RADAR-MongoDB-Connector + +It runs the RADAR-CNS MongoDB Connector project based on Confluent Platform 3.1.1, for more details check the [repository](https://github.com/RADAR-CNS/RADAR-MongoDbConnector). + +Create the docker image: +``` +$ docker build -t radarcns/radar-mongodb-connector ./ +``` + +Or pull from dockerhub: +``` +$ docker pull radarcns/radar-mongodb-connector:0.1 +``` + +## Configuration + +This image has to be extended with a volume with appropriate `sink.properties` + +Sample MongoDB sink.properties +``` +# Kafka consumer configuration +name=radar-connector-mongodb-sink + +# Kafka connector configuration +connector.class=org.radarcns.mongodb.MongoDbSinkConnector +tasks.max=1 + +# Topics that will be consumed +topics=topic1, topic2 + +# MongoDB server +mongo.host=mongo +mongo.port=27017 + +# MongoDB configuration +mongo.username= +mongo.password= +mongo.database=mydbase + +# Collection name for putting data into the MongoDB database. The {$topic} token will be replaced +# by the Kafka topic name. +#mongo.collection.format={$topic} + +# Factory class to do the actual record conversion +record.converter.class=org.radarcns.sink.mongodb.RecordConverterFactoryRadar +``` + +## Runtime environment variables + +This container requires two environment variable: + +- `KAFKA_REST_PROXY`: a valid Rest-Proxy instance +- `TOPIC_LIST`: a comma separated list containing all required topic names + +Before starting the streams, it waits until all topics inside TOPIC_LIST are available. This check is performed using the /topic Rest-Proxy API, for more details click here. + +Note that connector's REST_PORT must be different from the one used by Rest-Proxy. + +## How to run + +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file diff --git a/images/radar-mongodb-connector/kafka_status.sh b/images/radar-mongodb-connector/kafka_status.sh new file mode 100755 index 000000000..dbed2e33f --- /dev/null +++ b/images/radar-mongodb-connector/kafka_status.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Check if variables exist +if [ -z "$KAFKA_REST_PROXY" ]; then + echo "KAFKA_REST_PROXY is not defined" + exit 126 +fi + +if [ -z "$TOPIC_LIST" ]; then + echo "TOPIC_LIST is not defined" + exit 126 +fi + +# Fetch env topic list +IFS=', ' read -r -a needed <<< $TOPIC_LIST + +# Fetch env topic list +count=0 +interval=1 +while [ "$count" != "${#needed[@]}" ] ; do + + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi + + count=0 + TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") + TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" + + IFS=',' read -r -a array <<< $TOPICS + for topic in "${array[@]}" + do + for need in "${needed[@]}" + do + if [ "$topic" = "$need" ] ; then + ((count++)) + fi + done + done +done + +echo "All topics are now available. Ready to go!" \ No newline at end of file diff --git a/images/radar-mongodb-connector/launch b/images/radar-mongodb-connector/launch new file mode 100755 index 000000000..636534a33 --- /dev/null +++ b/images/radar-mongodb-connector/launch @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# +# Copyright 2016 Confluent Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. +if [ -z "$KAFKA_JMX_OPTS" ]; then + export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi + +# The JMX client needs to be able to connect to java.rmi.server.hostname. +# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. +# For host n/w, this is the IP that the hostname on the host resolves to. + +# If you have more that one n/w configured, hostname -i gives you all the IPs, +# the default is to pick the first IP (or network). +export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} + +if [ "$KAFKA_JMX_PORT" ]; then + # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. + export JMX_PORT=$KAFKA_JMX_PORT + export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" +fi + +# Busy waiting loop that waits untill all topic are available +echo "===> Waiting RADAR-CNS topics ... " +./home/kafka_status.sh + +echo "===> Launching ${COMPONENT} ... new" +# Add external jars to the classpath +# And this also makes sure that the CLASSPATH does not start with ":/etc/..." +# because this causes the plugin scanner to scan the entire disk. +export CLASSPATH="/etc/${COMPONENT}/jars/*" +echo $CLASSPATH +exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties /etc/"${COMPONENT}"/sink.properties diff --git a/images/radar-mongodb-connector/sink-mongodb.properties b/images/radar-mongodb-connector/sink-mongodb.properties new file mode 100644 index 000000000..f4b641e20 --- /dev/null +++ b/images/radar-mongodb-connector/sink-mongodb.properties @@ -0,0 +1,25 @@ +# Kafka consumer configuration +name=radar-connector-mongodb-sink + +# Kafka connector configuration +connector.class=org.radarcns.mongodb.MongoDbSinkConnector +tasks.max=1 + +# Topics that will be consumed +topics=android_empatica_e4_battery_level,android_empatica_e4_battery_level_output + +# MongoDB server +mongo.host=mongo +mongo.port=27017 + +# MongoDB configuration +#mongo.username= +#mongo.password= +mongo.database=mydbase + +# Collection name for putting data into the MongoDB database. The {$topic} token will be replaced +# by the Kafka topic name. +#mongo.collection.format={$topic} + +# Factory class to do the actual record conversion +record.converter.class=org.radarcns.sink.mongodb.RecordConverterFactoryRadar From 491e05a9d61c4d5ec07e4b4d4202ddc3dbc7ecf2 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 25 Jan 2017 12:31:18 +0000 Subject: [PATCH 056/197] Removed jq dependency --- .../kafka-radarinit/Dockerfile | 6 ----- .../kafka-radarinit/topic_init.sh | 24 +++++++++---------- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile index 47e9a7c19..b0f34b88f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile @@ -1,11 +1,5 @@ FROM confluentinc/cp-kafka:3.1.1 -# Update aptitude with new repo -RUN apt-get update - -# Install jq to parse JSON -RUN apt-get install -y jq - # Copy bash file COPY ./topic_init.sh /home/ RUN chmod +x /home/topic_init.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 356fc7b68..12f0e792f 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -9,27 +9,25 @@ if [ -f /home/.radar_topic_set ]; then fi # Wait untill all brokers are up & running -INTERVAL=1 +interval=1 while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do - BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers | jq '.brokers') - BROKERS="$(echo -e "${BROKERS}" | tr -d '[:space:]')" - BROKERS="${BROKERS:1}" + BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers) + BROKERS="$(echo -e "${BROKERS:12}" | tr -d '[:space:]' | tr -d '}' | tr -d ']')" - LENGTH=0 IFS=',' read -r -a array <<< $BROKERS - for element in "${array[@]}" - do - LENGTH=${#array[@]} - done + LENGTH=${#array[@]} if [ "$LENGTH" != "$KAFKA_BROKERS" ]; then - sleep $INTERVAL - if (( INTERVAL < 3 )); then - ((INTERVAL++)) - fi + echo "Only $LENGTH over $KAFKA_BROKERS brokers are currently available" + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi fi done + # Check if variables exist if [ -z "$RADAR_TOPICS" ]; then echo "RADAR_TOPICS is not defined" From a5f5f5b549fa6501d3e427cfc613435ba8d9175f Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Wed, 25 Jan 2017 16:53:32 +0100 Subject: [PATCH 057/197] links updated --- README.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d10b870cb..ca0344635 100644 --- a/README.md +++ b/README.md @@ -8,22 +8,28 @@ To install RADAR-CNS stack, do the following: 1. Install Docker Engine * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) * Installation for Windows ( Follow [installer](https://docs.docker.com/docker-for-windows/ from Docker) - * Installation for Ubuntu (Follow our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu) page) + * Installation for Ubuntu (Follow [Docker instructions](https://docs.docker.com/engine/installation/linux/ubuntu/)) * For other Linux distributions, install Docker engine from [the list by Docker](https://docs.docker.com/engine/installation/). Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). -2. Verify the Docker installation by running on the command-line: +2. Install `docker-compose` by following instructions [here](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose) +3. Verify the Docker installation by running on the command-line: ```shell sudo docker --version sudo docker-compose --version ``` This should show Docker version 1.12 or later and docker-compose version 1.9.0 or later. -3. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. -4. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. +4. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. + 1. For Ubuntu + ```shell + sudo apt-get install git + ``` + +5. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. ```shell git clone https://github.com/RADAR-CNS/RADAR-Docker.git ``` -5. Install required component stack following the instructions below. +6. Install required component stack following the instructions below. ## Usage From 1f5a46165e9ff368580a7495cd27ee473882cfb9 Mon Sep 17 00:00:00 2001 From: nivethika Date: Fri, 27 Jan 2017 17:13:38 +0100 Subject: [PATCH 058/197] start and stop scripts --- .../start-radar-stack.sh | 19 +++++++++++++++++++ .../radar-cp-hadoop-stack/stop-radar-stack.sh | 4 ++++ 2 files changed, 23 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh create mode 100644 dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh new file mode 100644 index 000000000..61d153cc3 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +if command_exists docker; then + echo $(docker --version) +fi + +if command_exists docker-compose; then + echo $(docker-compose --version) +fi + +echo "==> Creating docker network - hadoop" +sudo docker network create hadoop + +echo "==> Starting RADAR-CNS Platform" +sudo docker-compose up -d \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh new file mode 100644 index 000000000..de9b461a7 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +echo "==> Stopping RADAR-CNS Stack" +sudo docker-compose down \ No newline at end of file From e8ec1e4dcce21b71b8c1539bb30f9c03a3b81ffe Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Sat, 28 Jan 2017 16:40:48 +0000 Subject: [PATCH 059/197] Scripts to install, start, reboot and stop RADAR-CNS platform This commit fixes also the network issue between connectors and Rest-Proxy. New launch and kafka_status.sh files failover the network issue rebooting connectors till the success. --- dcompose-stack/radar-cp-hadoop-stack/.env | 3 +- .../radar-cp-hadoop-stack/docker-compose.yml | 8 +-- .../install-radar-stack.sh | 56 +++++++++++++++++++ .../kafka-radarinit/topic_init.sh | 3 +- .../reboot-radar-stack.sh | 34 +++++++++++ .../start-radar-stack.sh | 28 +++++++--- .../radar-cp-hadoop-stack/stop-radar-stack.sh | 0 images/radar-hdfs-connector/kafka_status.sh | 14 ++++- images/radar-hdfs-connector/launch | 8 ++- .../radar-mongodb-connector/kafka_status.sh | 14 ++++- images/radar-mongodb-connector/launch | 8 ++- 11 files changed, 155 insertions(+), 21 deletions(-) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh create mode 100755 dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh mode change 100644 => 100755 dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh mode change 100644 => 100755 dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index fab55092f..468e23f32 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -2,7 +2,8 @@ HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 +RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output +RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_battery_level, android_empatica_e4_blood_volume_pulse, android_empatica_e4_electrodermal_activity, android_empatica_e4_inter_beat_interval, android_empatica_e4_sensor_status, android_empatica_e4_temperature HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= -RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 5e34ba85c..5bf995d44 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -233,7 +233,7 @@ services: #---------------------------------------------------------------------------# radar-mongodb-connector: image: radarcns/radar-mongodb-connector-auto:0.1 - restart: on-failure:3 + restart: on-failure volumes: - ./sink-mongo.properties:/etc/kafka-connect/sink.properties networks: @@ -273,7 +273,7 @@ services: #---------------------------------------------------------------------------# radar-hdfs-connector: image: radarcns/radar-hdfs-connector-auto:0.1 - restart: on-failure:3 + restart: on-failure volumes: - ./sink-hdfs.properties:/etc/kafka-connect/sink.properties networks: @@ -294,7 +294,7 @@ services: - hdfs-namenode environment: CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 - CONNECT_REST_PORT: 8084 + CONNECT_REST_PORT: 8083 CONNECT_GROUP_ID: "default" CONNECT_CONFIG_STORAGE_TOPIC: "default.config" CONNECT_OFFSET_STORAGE_TOPIC: "default.offsets" @@ -308,7 +308,7 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 - KAFKA_REST_PROXY: "http://rest-proxy-1:8082" + KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh new file mode 100755 index 000000000..fe434b684 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo "Linux version: "$(uname -a) + +if command_exists docker + then + echo "Docker version: "$(docker --version) + else + echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" + exit 1 +fi + +if command_exists docker-compose + then + echo "Docker-compose version: "$(docker-compose --version) + else + echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" + exit 1 +fi + +if [ ! -d /usr/local/var/lib/docker ]; then + echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" + exit 1 +fi + +echo "==> Creating docker network - hadoop" +sudo docker network create hadoop + +echo "==> Setting MongoDB Connector" +# Extract credentials from .env file +username=$(cat .env | grep HOTSTORAGE_USERNAME=radar) +password=$(cat .env | grep HOTSTORAGE_PASSWORD=radar) +database=$(cat .env | grep HOTSTORAGE_NAME=hotstorage) +username="$(echo -e "${username:20}" | tr -d '[:space:]' )" +password="$(echo -e "${password:20}" | tr -d '[:space:]' )" +database="$(echo -e "${database:16}" | tr -d '[:space:]' )" +# Update sink-mongo.properties +sed -i '/mongo.username=/c\mongo.username='$username sink-mongo.properties +sed -i '/mongo.password=/c\mongo.password='$password sink-mongo.properties +sed -i '/mongo.database=/c\mongo.database='$database sink-mongo.properties +# Set topics +topic_list=$(cat .env | grep RADAR_TOPIC_LIST) +topic_list="$(echo -e "${topic_list:17}")" +sed -i '/topics=/c\topics='"$topic_list" sink-mongo.properties + +echo "==> Setting HDFS Connector" +raw_topic=$(cat .env | grep RADAR_RAW_TOPIC_LIST) +raw_topic="$(echo -e "${raw_topic:21}")" +sed -i '/topics=/c\topics='"$raw_topic" sink-hdfs.properties + +echo "==> Starting RADAR-CNS Platform" +sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 12f0e792f..6398c7944 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -18,8 +18,7 @@ while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do LENGTH=${#array[@]} if [ "$LENGTH" != "$KAFKA_BROKERS" ]; then - echo "Only $LENGTH over $KAFKA_BROKERS brokers are currently available" - echo "Waiting $interval second before retrying ..." + echo "Expected $KAFKA_BROKERS brokers but found only $LENGTH. Waiting $interval second before retrying ..." sleep $interval if (( interval < 30 )); then ((interval=interval*2)) diff --git a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh new file mode 100755 index 000000000..0ba71bce7 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo "Linux version: "$(uname -a) + +if command_exists docker + then + echo "Docker version: "$(docker --version) + else + echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" + exit 1 +fi + +if command_exists docker-compose + then + echo "Docker-compose version: "$(docker-compose --version) + else + echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" + exit 1 +fi + +if [ ! -d /usr/local/var/lib/docker ]; then + echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" + exit 1 +fi + +echo "==> Stopping RADAR-CNS Platform" +sudo docker-compose down + +echo "==> Starting RADAR-CNS Platform" +sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh old mode 100644 new mode 100755 index 61d153cc3..6fb028cef --- a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh @@ -1,19 +1,31 @@ #!/bin/bash command_exists() { - command -v "$@" > /dev/null 2>&1 + command -v "$@" > /dev/null 2>&1 } -if command_exists docker; then - echo $(docker --version) +echo "Linux version: "$(uname -a) + +if command_exists docker + then + echo "Docker version: "$(docker --version) + else + echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" + exit 1 fi -if command_exists docker-compose; then - echo $(docker-compose --version) +if command_exists docker-compose + then + echo "Docker-compose version: "$(docker-compose --version) + else + echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" + exit 1 fi -echo "==> Creating docker network - hadoop" -sudo docker network create hadoop +if [ ! -d /usr/local/var/lib/docker ]; then + echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" + exit 1 +fi echo "==> Starting RADAR-CNS Platform" -sudo docker-compose up -d \ No newline at end of file +sudo docker-compose up -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh old mode 100644 new mode 100755 diff --git a/images/radar-hdfs-connector/kafka_status.sh b/images/radar-hdfs-connector/kafka_status.sh index dbed2e33f..baeb07bb4 100755 --- a/images/radar-hdfs-connector/kafka_status.sh +++ b/images/radar-hdfs-connector/kafka_status.sh @@ -17,8 +17,15 @@ IFS=', ' read -r -a needed <<< $TOPIC_LIST # Fetch env topic list count=0 interval=1 +max_retryes=5 while [ "$count" != "${#needed[@]}" ] ; do + if [ "$max_retryes" -eq "0" ] ; then + echo "Error connecting to Rest-Proxy ... " + echo "Rebooting ... " + exit 126 + fi + echo "Waiting $interval second before retrying ..." sleep $interval if (( interval < 30 )); then @@ -27,6 +34,7 @@ while [ "$count" != "${#needed[@]}" ] ; do count=0 TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") + curl_result=$? TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" IFS=',' read -r -a array <<< $TOPICS @@ -39,6 +47,10 @@ while [ "$count" != "${#needed[@]}" ] ; do fi done done + + if [ "$curl_result" -ne "0" ] ; then + ((max_retryes--)) + fi done -echo "All topics are now available. Ready to go!" \ No newline at end of file +echo "All topics are now available. Ready to go!" diff --git a/images/radar-hdfs-connector/launch b/images/radar-hdfs-connector/launch index 636534a33..5b584b6f2 100755 --- a/images/radar-hdfs-connector/launch +++ b/images/radar-hdfs-connector/launch @@ -34,9 +34,13 @@ if [ "$KAFKA_JMX_PORT" ]; then export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" fi -# Busy waiting loop that waits untill all topic are available -echo "===> Waiting RADAR-CNS topics ... " +# Busy waiting loop that waits untill all topic are available +echo "===> Wait for infrastructure ..." ./home/kafka_status.sh +radar_check=$? +if [ "$radar_check" -ne 0 ]; then + exit $radar_check +fi echo "===> Launching ${COMPONENT} ... new" # Add external jars to the classpath diff --git a/images/radar-mongodb-connector/kafka_status.sh b/images/radar-mongodb-connector/kafka_status.sh index dbed2e33f..baeb07bb4 100755 --- a/images/radar-mongodb-connector/kafka_status.sh +++ b/images/radar-mongodb-connector/kafka_status.sh @@ -17,8 +17,15 @@ IFS=', ' read -r -a needed <<< $TOPIC_LIST # Fetch env topic list count=0 interval=1 +max_retryes=5 while [ "$count" != "${#needed[@]}" ] ; do + if [ "$max_retryes" -eq "0" ] ; then + echo "Error connecting to Rest-Proxy ... " + echo "Rebooting ... " + exit 126 + fi + echo "Waiting $interval second before retrying ..." sleep $interval if (( interval < 30 )); then @@ -27,6 +34,7 @@ while [ "$count" != "${#needed[@]}" ] ; do count=0 TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") + curl_result=$? TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" IFS=',' read -r -a array <<< $TOPICS @@ -39,6 +47,10 @@ while [ "$count" != "${#needed[@]}" ] ; do fi done done + + if [ "$curl_result" -ne "0" ] ; then + ((max_retryes--)) + fi done -echo "All topics are now available. Ready to go!" \ No newline at end of file +echo "All topics are now available. Ready to go!" diff --git a/images/radar-mongodb-connector/launch b/images/radar-mongodb-connector/launch index 636534a33..5b584b6f2 100755 --- a/images/radar-mongodb-connector/launch +++ b/images/radar-mongodb-connector/launch @@ -34,9 +34,13 @@ if [ "$KAFKA_JMX_PORT" ]; then export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" fi -# Busy waiting loop that waits untill all topic are available -echo "===> Waiting RADAR-CNS topics ... " +# Busy waiting loop that waits untill all topic are available +echo "===> Wait for infrastructure ..." ./home/kafka_status.sh +radar_check=$? +if [ "$radar_check" -ne 0 ]; then + exit $radar_check +fi echo "===> Launching ${COMPONENT} ... new" # Add external jars to the classpath From 3cc30c4804ea33f1916bff25a2de1c671ed1f590 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Sun, 29 Jan 2017 20:25:02 +0000 Subject: [PATCH 060/197] Fixed docker-compose command --- dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh | 7 ++----- dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh | 2 +- dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh index 0ba71bce7..50768892b 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh @@ -27,8 +27,5 @@ if [ ! -d /usr/local/var/lib/docker ]; then exit 1 fi -echo "==> Stopping RADAR-CNS Platform" -sudo docker-compose down - -echo "==> Starting RADAR-CNS Platform" -sudo docker-compose up -d +echo "==> Restarting RADAR-CNS Platform" +sudo docker-compose restart \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh index 6fb028cef..4bfda70a0 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh @@ -28,4 +28,4 @@ if [ ! -d /usr/local/var/lib/docker ]; then fi echo "==> Starting RADAR-CNS Platform" -sudo docker-compose up -d +sudo docker-compose start diff --git a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh index de9b461a7..505aa969b 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh @@ -1,4 +1,4 @@ #!/bin/bash echo "==> Stopping RADAR-CNS Stack" -sudo docker-compose down \ No newline at end of file +sudo docker-compose stop \ No newline at end of file From e3a796892cc6facc17f8adb40a90c973488854eb Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Sun, 29 Jan 2017 20:40:49 +0000 Subject: [PATCH 061/197] Added new instructions based on bash scripts --- README.md | 55 ++++++++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index ca0344635..ff8a1c497 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # RADAR-Docker -The dockerized RADAR stack for deploying the RADAR-CNS platform. Component repositories can be found here [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) +The dockerized RADAR stack for deploying the RADAR-CNS platform. Component repositories can be found at [RADAR-CNS DockerHub org](https://hub.docker.com/u/radarcns/dashboard/) ## Installation instructions To install RADAR-CNS stack, do the following: @@ -77,12 +77,7 @@ To run RADAR-CNS stack in a single node setup: ```shell cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ ``` -2. Hadoop requires an external network. Create a network named `hadoop`: - - ```shell - sudo docker network create hadoop - ``` -3. Configure monitor settings in `radar.yml`: +2. Configure monitor settings in `radar.yml`: ```yaml battery_monitor: @@ -93,6 +88,7 @@ To run RADAR-CNS stack in a single node setup: email_user: user@example.com topics: - android_empatica_e4_battery_level + disconnect_monitor: # timeout in milliseconds -> 5 minutes timeout: 300000 @@ -105,50 +101,43 @@ To run RADAR-CNS stack in a single node setup: topics: - android_empatica_e4_temperature ``` -4. Create `smtp.env` and configure your email settings following `smtp.env.template`. Configure alternative mail providers like Amazon SES or Gmail by using the parameters of the [`namshi/smtp` Docker image](https://hub.docker.com/r/namshi/smtp/). -5. (Optional) Modify topics, flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` +3. Create `smtp.env` and configure your email settings following `smtp.env.template`. Configure alternative mail providers like Amazon SES or Gmail by using the parameters of the [`namshi/smtp` Docker image](https://hub.docker.com/r/namshi/smtp/). +4. (Optional) Modify flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` ```ini - topics=topic1,topic2 flush.size= topics.dir=/path/to/data ``` -6. Configure Hot Storage settings in `.env` file +5. Configure Hot Storage settings in `.env` file ```ini HOTSTORAGE_USERNAME=mongodb-user HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME=mongodb-database - ``` - > **Note**: These properties are used to initialise a MongoDB database from scratch and to establish a connection between MongoDB and Rest-API -7. Modify topics and MongoDB configuration for Hot storage in `sink-mongo.properties` - - ```ini - # Topics that will be consumed - topics=topic1,topic2 - # MongoDB configuration - mongo.username=mongodb-user - mongo.password=XXXXXXXX - mongo.database=mongodb-database - ``` - > **Note**: The MongoDB configuration must mirror `.env` file parameters configurated at point 6 -8. (Optional) For secuirity reasons, the `auto.creation.topics.enable` has been set to `false`. To create the required topics, modify the comma separated list parameter `RADAR_TOPIC_LIST` in `.env` file - - ```ini - RADAR_TOPIC_LIST=topic1, topic2 - ``` - > **Note**: The parameter has been already set up to support Empatica E4 integration. -9. Start the stack + ``` +6. To install the stack ```shell - sudo docker-compose up -d --build + sudo ./install-radar-stack.sh ``` To stop RADAR-CNS stack on a single node setup, run ```shell cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ -sudo docker-compose down +sudo ./stop-radar-stack.sh +``` +To reboot RADAR-CNS stack on a single node setup, run + +```shell +cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ +sudo ./reboot-radar-stack.sh +``` +To start RADAR-CNS stack on a single node setup after installing, run + +```shell +cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ +sudo ./start-radar-stack.sh ``` ## Work in progress From 1c50fc2bc9987ab2e0b0071c4eccf43f18d15ebc Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Sun, 29 Jan 2017 21:29:37 +0000 Subject: [PATCH 062/197] New topic variables in install-radar-stack --- dcompose-stack/radar-cp-hadoop-stack/.env | 1 - .../radar-cp-hadoop-stack/install-radar-stack.sh | 11 +++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 468e23f32..5069036b7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,6 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output -RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_battery_level, android_empatica_e4_blood_volume_pulse, android_empatica_e4_electrodermal_activity, android_empatica_e4_inter_beat_interval, android_empatica_e4_sensor_status, android_empatica_e4_temperature HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index fe434b684..3ee6aa941 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,5 +1,8 @@ #!/bin/bash +RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration, android_empatica_e4_battery_level, android_empatica_e4_blood_volume_pulse, android_empatica_e4_electrodermal_activity, android_empatica_e4_inter_beat_interval, android_empatica_e4_sensor_status, android_empatica_e4_temperature" +RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output" + command_exists() { command -v "$@" > /dev/null 2>&1 } @@ -43,14 +46,10 @@ sed -i '/mongo.username=/c\mongo.username='$username sink-mongo.properties sed -i '/mongo.password=/c\mongo.password='$password sink-mongo.properties sed -i '/mongo.database=/c\mongo.database='$database sink-mongo.properties # Set topics -topic_list=$(cat .env | grep RADAR_TOPIC_LIST) -topic_list="$(echo -e "${topic_list:17}")" -sed -i '/topics=/c\topics='"$topic_list" sink-mongo.properties +sed -i '/topics=/c\topics='"$$RADAR_AGG_TOPIC_LIST" sink-mongo.properties echo "==> Setting HDFS Connector" -raw_topic=$(cat .env | grep RADAR_RAW_TOPIC_LIST) -raw_topic="$(echo -e "${raw_topic:21}")" -sed -i '/topics=/c\topics='"$raw_topic" sink-hdfs.properties +sed -i '/topics=/c\topics='"$RADAR_RAW_TOPIC_LIST" sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" sudo docker-compose up -d From b8f1f1cdaade59eae16e3ada476afda8b2fb63c9 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Sun, 29 Jan 2017 21:34:12 +0000 Subject: [PATCH 063/197] Fixed typo error --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 3ee6aa941..e6fbabd6c 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -46,7 +46,7 @@ sed -i '/mongo.username=/c\mongo.username='$username sink-mongo.properties sed -i '/mongo.password=/c\mongo.password='$password sink-mongo.properties sed -i '/mongo.database=/c\mongo.database='$database sink-mongo.properties # Set topics -sed -i '/topics=/c\topics='"$$RADAR_AGG_TOPIC_LIST" sink-mongo.properties +sed -i '/topics=/c\topics='"$RADAR_AGG_TOPIC_LIST" sink-mongo.properties echo "==> Setting HDFS Connector" sed -i '/topics=/c\topics='"$RADAR_RAW_TOPIC_LIST" sink-hdfs.properties From e9cbee4e909314359ae50b9ae591c7bd3fc4129a Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 30 Jan 2017 09:50:27 +0000 Subject: [PATCH 064/197] Moved HotStorage Dockerfile in image folder --- images/radar-hotstorage-mongodb/Dockerfile | 10 ++++ images/radar-hotstorage-mongodb/README.md | 36 +++++++++++++ images/radar-hotstorage-mongodb/db_init.sh | 61 ++++++++++++++++++++++ images/radar-hotstorage-mongodb/init.sh | 17 ++++++ 4 files changed, 124 insertions(+) create mode 100644 images/radar-hotstorage-mongodb/Dockerfile create mode 100644 images/radar-hotstorage-mongodb/README.md create mode 100644 images/radar-hotstorage-mongodb/db_init.sh create mode 100644 images/radar-hotstorage-mongodb/init.sh diff --git a/images/radar-hotstorage-mongodb/Dockerfile b/images/radar-hotstorage-mongodb/Dockerfile new file mode 100644 index 000000000..0bf224a09 --- /dev/null +++ b/images/radar-hotstorage-mongodb/Dockerfile @@ -0,0 +1,10 @@ +FROM mongo:3.2.10 + +COPY ["./init.sh", "./db_init.sh", "./"] + +RUN chmod +x init.sh && chmod +x db_init.sh + +EXPOSE 27017 +EXPOSE 28017 + +CMD ["./init.sh"] \ No newline at end of file diff --git a/images/radar-hotstorage-mongodb/README.md b/images/radar-hotstorage-mongodb/README.md new file mode 100644 index 000000000..fc0c75dab --- /dev/null +++ b/images/radar-hotstorage-mongodb/README.md @@ -0,0 +1,36 @@ +# DockerisedRADAR-HotStorage + +Upon the first start, this dockerised version of MongoDB 3.2.10 creates a db named `RADAR_DB` owned by user `RADAR_USER` with password `RADAR_PWD`. + +Create the docker image: +``` +$ docker build -t radarcns/radar-mongo ./ +``` + +Or pull from dockerhub: +``` +$ docker pull radarcns/radar-mongo:latest +``` + +Run the docker image locally: +``` +$ docker run -d -p 27017:27017 -p 28017:28017 --name radar-hotstorage radarcns/radar-mongo:latest -e RADAR_USER="restapi" -e RADAR_PWD="radar" -e RADAR_DB="hotstorage" +``` + +To test MongoDB, access the [Mongo Dashboard](http://localhost:28017) + +## Runtime environment variables + +Environment variables used by the RestApi + +```bash +# authentication flag for MongoDB +AUTH yes + +# mongoDb user and password +RADAR_USER restapi +RADAR_PWD radar + +# mongoDb database +RADAR_DB hotstorage +``` \ No newline at end of file diff --git a/images/radar-hotstorage-mongodb/db_init.sh b/images/radar-hotstorage-mongodb/db_init.sh new file mode 100644 index 000000000..7ff6690ed --- /dev/null +++ b/images/radar-hotstorage-mongodb/db_init.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +RET=1 +while [[ RET -ne 0 ]]; do + echo "=> Waiting for confirmation of MongoDB service startup" + sleep 5 + mongo admin --eval "help" >/dev/null 2>&1 + RET=$? +done + +if [ -f /data/db/.radar_hotstorage_set ]; then + echo "**********************************************" + echo "** RADAR-CNS Hotstorage is up and running **" + echo "**********************************************" + exit 0 +fi + +if [ -z "$RADAR_USER" ]; then + echo "$RADAR_USER is not defined" + exit 126 +fi + +if [ -z "$RADAR_PWD" ]; then + echo "$RADAR_PWD is not defined" + exit 126 +fi + +if [ -z "$RADAR_DB" ]; then + echo "$RADAR_DB is not defined" + exit 126 +fi + +echo "=> MongoDB is ready" + +echo "=> Creating DB and User for RADAR-CNS Hot Storage" + +mongo admin --eval 'db.createUser( { user: "'${RADAR_USER}'", pwd: "'${RADAR_PWD}'", roles: [ { role: "root", db: "admin" } ] } )' +mongo admin -u $RADAR_USER -p $RADAR_PWD < Starting MongoDB" + +$cmd & + +/db_init.sh + +fg \ No newline at end of file From fef04a7c368019448ec8a8d61f1fa4d72baacf15 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 31 Jan 2017 14:33:21 +0000 Subject: [PATCH 065/197] Added volumes for MongoDb --- dcompose-stack/radar-cp-hadoop-stack/.env | 1 + dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 5069036b7..cc839ab64 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -2,6 +2,7 @@ HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 +MONGODB_DIR=/usr/local/var/lib/docker/mongodb RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 5bf995d44..833709fc5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -143,6 +143,9 @@ services: image: radarcns/radar-hotstorage:0.1 networks: - api + volumes: + - "${MONGODB_DIR}/db:/data/db" + - "${MONGODB_DIR}/configdb:/data/configdb" ports: - "27017:27017" - "28017:28017" From 22ef52abf2e1babfa1cbb262152cd88b4dfd9ad0 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 31 Jan 2017 14:38:12 +0000 Subject: [PATCH 066/197] Fixed exit code --- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 8 ++++---- images/radar-backend-kafka/kafka_status.sh | 4 ++-- images/radar-hdfs-connector/kafka_status.sh | 6 +++--- images/radar-hotstorage-mongodb/db_init.sh | 6 +++--- images/radar-mongodb-connector/kafka_status.sh | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 6398c7944..82e606e0b 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -30,22 +30,22 @@ done # Check if variables exist if [ -z "$RADAR_TOPICS" ]; then echo "RADAR_TOPICS is not defined" - exit 126 + exit 2 fi if [ -z "$KAFKA_ZOOKEEPER_CONNECT" ]; then echo "KAFKA_ZOOKEEPER_CONNECT is not defined" - exit 126 + exit 2 fi if [ -z "$RADAR_PARTITIONS" ]; then echo "RADAR_PARTITIONS is not defined" - exit 126 + exit 2 fi if [ -z "$RADAR_REPLICATION_FACTOR" ]; then echo "RADAR_REPLICATION_FACTOR is not defined" - exit 126 + exit 2 fi # Create topics diff --git a/images/radar-backend-kafka/kafka_status.sh b/images/radar-backend-kafka/kafka_status.sh index dbed2e33f..6f24ffc5f 100755 --- a/images/radar-backend-kafka/kafka_status.sh +++ b/images/radar-backend-kafka/kafka_status.sh @@ -3,12 +3,12 @@ # Check if variables exist if [ -z "$KAFKA_REST_PROXY" ]; then echo "KAFKA_REST_PROXY is not defined" - exit 126 + exit 2 fi if [ -z "$TOPIC_LIST" ]; then echo "TOPIC_LIST is not defined" - exit 126 + exit 2 fi # Fetch env topic list diff --git a/images/radar-hdfs-connector/kafka_status.sh b/images/radar-hdfs-connector/kafka_status.sh index baeb07bb4..132d0a06a 100755 --- a/images/radar-hdfs-connector/kafka_status.sh +++ b/images/radar-hdfs-connector/kafka_status.sh @@ -3,12 +3,12 @@ # Check if variables exist if [ -z "$KAFKA_REST_PROXY" ]; then echo "KAFKA_REST_PROXY is not defined" - exit 126 + exit 2 fi if [ -z "$TOPIC_LIST" ]; then echo "TOPIC_LIST is not defined" - exit 126 + exit 2 fi # Fetch env topic list @@ -23,7 +23,7 @@ while [ "$count" != "${#needed[@]}" ] ; do if [ "$max_retryes" -eq "0" ] ; then echo "Error connecting to Rest-Proxy ... " echo "Rebooting ... " - exit 126 + exit 2 fi echo "Waiting $interval second before retrying ..." diff --git a/images/radar-hotstorage-mongodb/db_init.sh b/images/radar-hotstorage-mongodb/db_init.sh index 7ff6690ed..19f8109f8 100644 --- a/images/radar-hotstorage-mongodb/db_init.sh +++ b/images/radar-hotstorage-mongodb/db_init.sh @@ -17,17 +17,17 @@ fi if [ -z "$RADAR_USER" ]; then echo "$RADAR_USER is not defined" - exit 126 + exit 2 fi if [ -z "$RADAR_PWD" ]; then echo "$RADAR_PWD is not defined" - exit 126 + exit 2 fi if [ -z "$RADAR_DB" ]; then echo "$RADAR_DB is not defined" - exit 126 + exit 2 fi echo "=> MongoDB is ready" diff --git a/images/radar-mongodb-connector/kafka_status.sh b/images/radar-mongodb-connector/kafka_status.sh index baeb07bb4..132d0a06a 100755 --- a/images/radar-mongodb-connector/kafka_status.sh +++ b/images/radar-mongodb-connector/kafka_status.sh @@ -3,12 +3,12 @@ # Check if variables exist if [ -z "$KAFKA_REST_PROXY" ]; then echo "KAFKA_REST_PROXY is not defined" - exit 126 + exit 2 fi if [ -z "$TOPIC_LIST" ]; then echo "TOPIC_LIST is not defined" - exit 126 + exit 2 fi # Fetch env topic list @@ -23,7 +23,7 @@ while [ "$count" != "${#needed[@]}" ] ; do if [ "$max_retryes" -eq "0" ] ; then echo "Error connecting to Rest-Proxy ... " echo "Rebooting ... " - exit 126 + exit 2 fi echo "Waiting $interval second before retrying ..." From a7da9967ef779c60a3f8d741228ad6d2c9e074ae Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 31 Jan 2017 15:00:35 +0000 Subject: [PATCH 067/197] Fixed radar.yml example --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ff8a1c497..e246f4dee 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,9 @@ To run RADAR-CNS stack in a single node setup: ```yaml battery_monitor: level: CRITICAL - email_address: notify-me@example.com + email_address: + - notify-1@example.com + - notify-2@example.com email_host: smtp email_port: 25 email_user: user@example.com @@ -92,7 +94,9 @@ To run RADAR-CNS stack in a single node setup: disconnect_monitor: # timeout in milliseconds -> 5 minutes timeout: 300000 - email_address: notify-me@example.com + email_address: + - notify-1@example.com + - notify-2@example.com email_host: smtp email_port: 25 email_user: user@example.com From 09ea00e7c2bba6329f857ef22c98dbf6ab540e53 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 1 Feb 2017 11:01:18 +0100 Subject: [PATCH 068/197] hdfs-connector image to allow multiple connector-group properties to run in standalone mode --- images/radar-hdfs-connector/kafka_status.sh | 4 ++++ images/radar-hdfs-connector/launch | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/images/radar-hdfs-connector/kafka_status.sh b/images/radar-hdfs-connector/kafka_status.sh index 132d0a06a..a3264ea49 100755 --- a/images/radar-hdfs-connector/kafka_status.sh +++ b/images/radar-hdfs-connector/kafka_status.sh @@ -11,6 +11,10 @@ if [ -z "$TOPIC_LIST" ]; then exit 2 fi +if [ -z "$CONNECTOR_PROPERTY_FILE_PREFIX" ]; then + echo "CONNECTOR_PROPERTY_FILE_PREFIX is not defined" + exit 2 +fi # Fetch env topic list IFS=', ' read -r -a needed <<< $TOPIC_LIST diff --git a/images/radar-hdfs-connector/launch b/images/radar-hdfs-connector/launch index 5b584b6f2..d3333db15 100755 --- a/images/radar-hdfs-connector/launch +++ b/images/radar-hdfs-connector/launch @@ -48,4 +48,6 @@ echo "===> Launching ${COMPONENT} ... new" # because this causes the plugin scanner to scan the entire disk. export CLASSPATH="/etc/${COMPONENT}/jars/*" echo $CLASSPATH -exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties /etc/"${COMPONENT}"/sink.properties + +# execute connector in standalone mode +exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties $(find /etc/"${COMPONENT}"/ -type f -name "${CONNECTOR_PROPERTY_FILE_PREFIX}*.properties") From 9a16abbaec1a38dd635785ea6170b2a434ea9b18 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 1 Feb 2017 11:07:03 +0100 Subject: [PATCH 069/197] hdfs-connector with property-prefix --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 833709fc5..13bf92d07 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -278,7 +278,7 @@ services: image: radarcns/radar-hdfs-connector-auto:0.1 restart: on-failure volumes: - - ./sink-hdfs.properties:/etc/kafka-connect/sink.properties + - ./sink-hdfs.properties:/etc/kafka-connect/sink-hdfs.properties networks: - zookeeper - kafka @@ -313,6 +313,7 @@ services: CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} + CONNECTOR_PROPERTY_FILE_PREFIX: "sink-hdfs" #---------------------------------------------------------------------------# # RADAR backend streams # From a7439b120c1a94a59040ce6d89520d02dba021c1 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 1 Feb 2017 12:37:38 +0100 Subject: [PATCH 070/197] multiple-connector properties --- .../radar-cp-hadoop-stack/sink-hdfs-high.properties | 8 ++++++++ .../radar-cp-hadoop-stack/sink-hdfs-low.properties | 8 ++++++++ .../radar-cp-hadoop-stack/sink-hdfs-med.properties | 8 ++++++++ 3 files changed, 24 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties create mode 100644 dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties create mode 100644 dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties new file mode 100644 index 000000000..707ff0736 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties @@ -0,0 +1,8 @@ +name=radar-hdfs-sink-empatica-high-120000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=android_empatica_e4_blood_volume_pulse,android_empatica_e4_acceleration +flush.size=120000 +hdfs.url=hdfs://hdfs-namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidNew \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties new file mode 100644 index 000000000..ec81d423b --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties @@ -0,0 +1,8 @@ +name=radar-hdfs-sink-empatica-low-3000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=android_empatica_e4_battery_level,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status +flush.size=3000 +hdfs.url=hdfs://hdfs-namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidNew \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties new file mode 100644 index 000000000..0799fbfda --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties @@ -0,0 +1,8 @@ +name=radar-hdfs-sink-android-med-12000 +connector.class=io.confluent.connect.hdfs.HdfsSinkConnector +tasks.max=4 +topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_temperature +flush.size=12000 +hdfs.url=hdfs://hdfs-namenode:8020 +format.class=org.radarcns.sink.hdfs.AvroFormatRadar +topics.dir=topicAndroidEmpatica \ No newline at end of file From 12f9846921d9a61773575d0868c953c358fbf64c Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 3 Feb 2017 10:49:36 +0000 Subject: [PATCH 071/197] Fixed install script --- dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties b/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties index 05aee18d8..428a101ff 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties +++ b/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties @@ -6,7 +6,7 @@ connector.class=org.radarcns.mongodb.MongoDbSinkConnector tasks.max=1 # Topics that will be consumed -topics=android_empatica_e4_battery_level,android_empatica_e4_battery_level_output +topics= # MongoDB server mongo.host=hotstorage From e61b58a9a0b2fbfed3ee1bf1e0bd9bb67c82d400 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 6 Feb 2017 08:29:21 +0000 Subject: [PATCH 072/197] Tuning MongoDB connector --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 13bf92d07..371277fc2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -268,6 +268,10 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-mongodb-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 + CONNECT_CONSUMER_MAX_POLL_RECORDS: 500 + CONNECT_CONSUMER_MAX_POLL_INTERVAL_MS: 300000 + CONNECT_CONSUMER_SESSION_TIMEOUT_MS: 10000 + CONNECT_CONSUMER_HEARTBEAT_INTERVAL_MS: 3000 KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} From f07c9df31037ed8213781c0ec224112d7f6f9373 Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 6 Feb 2017 14:42:37 +0100 Subject: [PATCH 073/197] fixing install-script space delimiter and mongo-connector property setting --- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- .../radar-cp-hadoop-stack/install-radar-stack.sh | 8 ++++---- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index cc839ab64..418de4da5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration, android_empatica_e4_acceleration_output, android_empatica_e4_battery_level, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature, android_empatica_e4_temperature_output +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index e6fbabd6c..8b714120d 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,6 +1,6 @@ #!/bin/bash -RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration, android_empatica_e4_battery_level, android_empatica_e4_blood_volume_pulse, android_empatica_e4_electrodermal_activity, android_empatica_e4_inter_beat_interval, android_empatica_e4_sensor_status, android_empatica_e4_temperature" +RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature" RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output" command_exists() { @@ -35,9 +35,9 @@ sudo docker network create hadoop echo "==> Setting MongoDB Connector" # Extract credentials from .env file -username=$(cat .env | grep HOTSTORAGE_USERNAME=radar) -password=$(cat .env | grep HOTSTORAGE_PASSWORD=radar) -database=$(cat .env | grep HOTSTORAGE_NAME=hotstorage) +username=$(cat .env | grep HOTSTORAGE_USERNAME) +password=$(cat .env | grep HOTSTORAGE_PASSWORD) +database=$(cat .env | grep HOTSTORAGE_NAME) username="$(echo -e "${username:20}" | tr -d '[:space:]' )" password="$(echo -e "${password:20}" | tr -d '[:space:]' )" database="$(echo -e "${database:16}" | tr -d '[:space:]' )" diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 82e606e0b..83a813653 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -50,7 +50,7 @@ fi # Create topics echo "Creating RADAR-CNS topics..." -IFS=', ' read -r -a array <<< "$RADAR_TOPICS" +IFS=',' read -r -a array <<< "$RADAR_TOPICS" for element in "${array[@]}" do From 54bfef2d078c651ba55ca0cb3b98aa3df8e9a278 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 7 Feb 2017 16:48:20 +0000 Subject: [PATCH 074/197] Integrated status topics --- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 418de4da5..1b3f6bc0a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_status_server,application_status_record_counts,application_status_uptime HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 8b714120d..2eb037393 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,7 +1,7 @@ #!/bin/bash RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature" -RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output" +RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_status_server, application_status_record_counts, application_status_uptime" command_exists() { command -v "$@" > /dev/null 2>&1 From ffe361f8af39c4781083fcec698201b1e25ab6fd Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Mon, 13 Feb 2017 10:38:03 +0100 Subject: [PATCH 075/197] Update cadvisor and portainer documentation --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index e246f4dee..fbe8d970f 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,13 @@ To start RADAR-CNS stack on a single node setup after installing, run cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ sudo ./start-radar-stack.sh ``` +#### cAdvisor +cAdvisor (Container Advisor) provides container users an understanding of the resource usage and performance characteristics of their running containers. + +To view current resource performance,if running locally, try [http://localhost:8181](http://localhost:8181). This will bring up the built-in Web UI. Clicking on `/docker` in `Subcontainers` takes you to a new window with all of the Docker containers listed individually. + +#### Portainer +Portainer provides simple interactive UI-based docker management. If running locally, try [http://localhost:8182](http://localhost:8182) for portainer's UI. To set-up portainer follow this [link](https://www.ostechnix.com/portainer-an-easiest-way-to-manage-docker/). ## Work in progress From 00613599bf4723a1c3df8e8d4981dc38637bae1a Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 13 Feb 2017 14:50:55 +0000 Subject: [PATCH 076/197] Tuned disconnection monitor timeout --- dcompose-stack/radar-cp-hadoop-stack/radar.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/radar.yml index 91888b681..e072c8975 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/radar.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/radar.yml @@ -48,7 +48,7 @@ battery_monitor: topics: - android_empatica_e4_battery_level -#========================= Battery level monitor=========================# +#======================= Disconnection monitor==========================# disconnect_monitor: email_address: - notifier@email @@ -58,4 +58,4 @@ disconnect_monitor: email_user: user@example.com topics: - android_empatica_e4_battery_level - timeout: 60000 \ No newline at end of file + timeout: 300000 From e356d7ea626bd00a8f6e6e6e73e940c9b388bd81 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 14 Feb 2017 12:12:03 +0100 Subject: [PATCH 077/197] Switched to Confluent version 3.1.2 --- .../radar-cp-hadoop-stack/docker-compose.yml | 14 +++++++------- .../kafka-radarinit/Dockerfile | 4 ++-- .../radar-cp-stack/docker-compose.yml | 18 +++++++++--------- images/radar-backend-kafka/Dockerfile | 2 +- images/radar-backend-kafka/README.md | 4 ++-- images/radar-hdfs-connector/Dockerfile | 2 +- images/radar-hdfs-connector/README.md | 4 ++-- images/radar-mongodb-connector/Dockerfile | 2 +- images/radar-mongodb-connector/README.md | 4 ++-- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 371277fc2..faabf934a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -19,7 +19,7 @@ services: # Zookeeper Cluster # #---------------------------------------------------------------------------# zookeeper-1: - image: confluentinc/cp-zookeeper:3.1.1 + image: confluentinc/cp-zookeeper:3.1.2-1 networks: - zookeeper environment: @@ -34,7 +34,7 @@ services: # Kafka Cluster # #---------------------------------------------------------------------------# kafka-1: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -47,7 +47,7 @@ services: KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" kafka-2: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -60,7 +60,7 @@ services: KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" kafka-3: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -76,7 +76,7 @@ services: # Schema Registry # #---------------------------------------------------------------------------# schema-registry-1: - image: confluentinc/cp-schema-registry:3.1.1 + image: confluentinc/cp-schema-registry:3.1.2-1 networks: - kafka - zookeeper @@ -96,7 +96,7 @@ services: # REST proxy # #---------------------------------------------------------------------------# rest-proxy-1: - image: confluentinc/cp-kafka-rest:3.1.1 + image: confluentinc/cp-kafka-rest:3.1.2-1 networks: - kafka - zookeeper @@ -118,7 +118,7 @@ services: #---------------------------------------------------------------------------# kafka-init: build: kafka-radarinit - image: radarcns/kafka-init:3.1.1 + image: radarcns/kafka-init:3.1.2-1 networks: - kafka - zookeeper diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile index b0f34b88f..9ce900c4b 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile @@ -1,7 +1,7 @@ -FROM confluentinc/cp-kafka:3.1.1 +FROM confluentinc/cp-kafka:3.1.2-1 # Copy bash file COPY ./topic_init.sh /home/ RUN chmod +x /home/topic_init.sh -CMD ["./home/topic_init.sh", "shutdown -h now"] \ No newline at end of file +CMD ["./home/topic_init.sh", "shutdown -h now"] diff --git a/dcompose-stack/radar-cp-stack/docker-compose.yml b/dcompose-stack/radar-cp-stack/docker-compose.yml index 94a1b5def..3deb640c2 100644 --- a/dcompose-stack/radar-cp-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-stack/docker-compose.yml @@ -15,7 +15,7 @@ services: # Zookeeper Cluster # #---------------------------------------------------------------------------# zookeeper-1: - image: confluentinc/cp-zookeeper:3.1.1 + image: confluentinc/cp-zookeeper:3.1.2-1 networks: - zookeeper environment: @@ -27,7 +27,7 @@ services: ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888 zookeeper-2: - image: confluentinc/cp-zookeeper:3.1.1 + image: confluentinc/cp-zookeeper:3.1.2-1 networks: - zookeeper environment: @@ -39,7 +39,7 @@ services: ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888 zookeeper-3: - image: confluentinc/cp-zookeeper:3.1.1 + image: confluentinc/cp-zookeeper:3.1.2-1 networks: - zookeeper environment: @@ -54,7 +54,7 @@ services: # Kafka Cluster # #---------------------------------------------------------------------------# kafka-1: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -68,7 +68,7 @@ services: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 kafka-2: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -80,7 +80,7 @@ services: KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 kafka-3: - image: confluentinc/cp-kafka:3.1.1 + image: confluentinc/cp-kafka:3.1.2-1 networks: - kafka - zookeeper @@ -95,7 +95,7 @@ services: # Schema Registry # #---------------------------------------------------------------------------# schema-registry-1: - image: confluentinc/cp-schema-registry:3.1.1 + image: confluentinc/cp-schema-registry:3.1.2-1 networks: - kafka - zookeeper @@ -115,7 +115,7 @@ services: # Kafka Connector # #---------------------------------------------------------------------------# connect: - image: confluentinc/cp-kafka-connect:3.1.1 + image: confluentinc/cp-kafka-connect:3.1.2-1 networks: - kafka - zookeeper @@ -149,7 +149,7 @@ services: # REST proxy # #---------------------------------------------------------------------------# rest-proxy-1: - image: confluentinc/cp-kafka-rest:3.1.1 + image: confluentinc/cp-kafka-rest:3.1.2-1 networks: - kafka - zookeeper diff --git a/images/radar-backend-kafka/Dockerfile b/images/radar-backend-kafka/Dockerfile index 43fe1596e..72cfbedb9 100644 --- a/images/radar-backend-kafka/Dockerfile +++ b/images/radar-backend-kafka/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. -FROM confluentinc/cp-base:3.1.1 +FROM confluentinc/cp-base:3.1.2-1 MAINTAINER Nivethika M , Joris B , Francesco N diff --git a/images/radar-backend-kafka/README.md b/images/radar-backend-kafka/README.md index 16eaae16f..5fca83d13 100644 --- a/images/radar-backend-kafka/README.md +++ b/images/radar-backend-kafka/README.md @@ -1,6 +1,6 @@ # Dockerised RADAR-Backend-Kafka -It runs the RADAR-CNS Backend Kafka solution based on Kafka Streams 3.1.1, for more details about Kafka Streams click [here](http://docs.confluent.io/3.1.1/streams/index.html). +It runs the RADAR-CNS Backend Kafka solution based on Kafka Streams 3.1.2, for more details about Kafka Streams click [here](http://docs.confluent.io/3.1.2/streams/index.html). Create the docker image: ``` @@ -27,4 +27,4 @@ Before starting the streams, it waits until all topics inside TOPIC_LIST are ava ## How to run -For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) diff --git a/images/radar-hdfs-connector/Dockerfile b/images/radar-hdfs-connector/Dockerfile index 9a8b8ee2f..3a2a794de 100644 --- a/images/radar-hdfs-connector/Dockerfile +++ b/images/radar-hdfs-connector/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. -FROM confluentinc/cp-kafka-connect:3.1.1 +FROM confluentinc/cp-kafka-connect:3.1.2-1 MAINTAINER Nivethika M , Joris B , Francesco N diff --git a/images/radar-hdfs-connector/README.md b/images/radar-hdfs-connector/README.md index 6b9ef06dd..e055c859b 100644 --- a/images/radar-hdfs-connector/README.md +++ b/images/radar-hdfs-connector/README.md @@ -1,6 +1,6 @@ # Dockerised RADAR-HDFS-Connector -It runs the Confluent HDFS Connector 3.1.1 using a custom [RecordWriterProvider](https://github.com/RADAR-CNS/RADAR-Backend/blob/dev/src/main/java/org/radarcns/sink/hdfs/AvroRecordWriterProviderRadar.java) to support RADAR-CNS Avro schemas. For more details about Confluent HDFS Connector click [here](http://docs.confluent.io/3.1.1/connect/connect-hdfs/docs/index.html). +It runs the Confluent HDFS Connector 3.1.2 using a custom [RecordWriterProvider](https://github.com/RADAR-CNS/RADAR-Backend/blob/dev/src/main/java/org/radarcns/sink/hdfs/AvroRecordWriterProviderRadar.java) to support RADAR-CNS Avro schemas. For more details about Confluent HDFS Connector click [here](http://docs.confluent.io/3.1.2/connect/connect-hdfs/docs/index.html). Create the docker image: ``` @@ -41,4 +41,4 @@ Note that connector's REST_PORT must be different from the one used by Rest-Prox ## How to run -For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) diff --git a/images/radar-mongodb-connector/Dockerfile b/images/radar-mongodb-connector/Dockerfile index 850732bbf..fdb3b02a3 100644 --- a/images/radar-mongodb-connector/Dockerfile +++ b/images/radar-mongodb-connector/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. -FROM confluentinc/cp-kafka-connect:3.1.1 +FROM confluentinc/cp-kafka-connect:3.1.2-1 MAINTAINER Nivethika M , Joris B , Francesco N diff --git a/images/radar-mongodb-connector/README.md b/images/radar-mongodb-connector/README.md index 6745a6325..7dee0c24a 100644 --- a/images/radar-mongodb-connector/README.md +++ b/images/radar-mongodb-connector/README.md @@ -1,6 +1,6 @@ # Dockerised RADAR-MongoDB-Connector -It runs the RADAR-CNS MongoDB Connector project based on Confluent Platform 3.1.1, for more details check the [repository](https://github.com/RADAR-CNS/RADAR-MongoDbConnector). +It runs the RADAR-CNS MongoDB Connector project based on Confluent Platform 3.1.2, for more details check the [repository](https://github.com/RADAR-CNS/RADAR-MongoDbConnector). Create the docker image: ``` @@ -58,4 +58,4 @@ Note that connector's REST_PORT must be different from the one used by Rest-Prox ## How to run -For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) \ No newline at end of file +For a complete use case scenario, check the RADAR-CNS `docker-compose` file available [here](https://github.com/RADAR-CNS/RADAR-Docker/blob/backend-integration/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml) From a6804823f15b5ba86bf0f0781e4ced5c55b552b9 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 24 Feb 2017 16:56:24 +0000 Subject: [PATCH 078/197] Fixed names for the application status topics --- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 1b3f6bc0a..4b97bc9ca 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_status_server,application_status_record_counts,application_status_uptime +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= From c93c5bf1ec2e49bae2c24a03676ffe6a1cba24c2 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 24 Feb 2017 17:16:15 +0000 Subject: [PATCH 079/197] Added Rest-Api container --- images/radar-restapi/Dockerfile | 36 +++++++++++++++++++++++++++++++++ images/radar-restapi/README.md | 34 +++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 images/radar-restapi/Dockerfile create mode 100644 images/radar-restapi/README.md diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile new file mode 100644 index 000000000..7e607ff12 --- /dev/null +++ b/images/radar-restapi/Dockerfile @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM tomcat:8.0.37 + +MAINTAINER @fnobilia, @afolarin + +LABEL description="RADAR-CNS Rest Api docker container" + +# Install Rest API +RUN echo && echo "==> Installing Components" \ + # Download Git RestApi release + && echo "==> Downloading RADAR-CNS/RADAR-RestApi v0.1-beta.1 release from GitHub" \ + && cd /usr/local && mkdir RADAR-RestApi && cd /usr/local/RADAR-RestApi \ + && wget https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1-beta.1/radar.war \ + # Deploy the war + && echo "==> Deploying the WAR" \ + && cp /usr/local/RADAR-RestApi/radar.war /usr/local/tomcat/webapps/ \ + # Remove repository + && echo "==> Cleaning up" \ + && cd /usr/local && rm -R /usr/local/RADAR-RestApi \ + && echo + +EXPOSE 8080 + +CMD ["catalina.sh", "run"] \ No newline at end of file diff --git a/images/radar-restapi/README.md b/images/radar-restapi/README.md new file mode 100644 index 000000000..e0988b5af --- /dev/null +++ b/images/radar-restapi/README.md @@ -0,0 +1,34 @@ +## Dockerised RADAR-RestApi + +Create the docker image: +``` +$ docker build -t radarcns/radar-restapi ./ +``` + +Or pull from dockerhub: +``` +$ docker pull radarcns/radar-restapi:latest +``` + +Run the docker image locally: +``` +$ docker run -d -p 8080:8080 --name radar-restapi radarcns/radar-restapi:0.1 +``` + +The RestApi will be running at http://localhost:8080. To test them, access the [Swagger Documentation](http://localhost:8080/radar/api/swagger.json) + +## Runtime environment variables + +Environment variables used by the RestApi + +```bash +# mongoDb user and password +MONGODB_USER='restapi' +MONGODB_PASS='radar' + +# mongoDb database +MONGODB_DATABASE='hotstorage' + +# mongoDb instance +MONGODB_HOST='localhost:27017' +``` From 56c9b19327ed56c87b0eaa4a0561638e937db137 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 27 Feb 2017 11:50:03 +0000 Subject: [PATCH 080/197] Added new topics for Connector settings --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 2eb037393..bd55ef6dd 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,7 +1,7 @@ #!/bin/bash -RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature" -RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_status_server, application_status_record_counts, application_status_uptime" +RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime" +RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime" command_exists() { command -v "$@" > /dev/null 2>&1 From 68b2c0390b7b614c6eca47a5fe7f030576401207 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 28 Feb 2017 13:40:55 +0000 Subject: [PATCH 081/197] Fixed HR name topic --- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index 4b97bc9ca..f5d507424 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate_output,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index bd55ef6dd..15986f4b2 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,7 +1,7 @@ #!/bin/bash RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime" -RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate_output, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime" +RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime" command_exists() { command -v "$@" > /dev/null 2>&1 From bd6225436ff72179994e870429d1617e0728932d Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 6 Mar 2017 17:31:12 +0100 Subject: [PATCH 082/197] new images with new-repo releases --- images/radar-backend-kafka/Dockerfile | 4 ++-- images/radar-hdfs-connector/Dockerfile | 9 +++++++-- images/radar-mongodb-connector/Dockerfile | 7 ++++++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/images/radar-backend-kafka/Dockerfile b/images/radar-backend-kafka/Dockerfile index 72cfbedb9..9dc584430 100644 --- a/images/radar-backend-kafka/Dockerfile +++ b/images/radar-backend-kafka/Dockerfile @@ -18,12 +18,12 @@ MAINTAINER Nivethika M , Joris B , Fra LABEL description="RADAR-CNS Backend streams and monitor" # Install RADAR-Backend -RUN echo && echo "==> Installing Components" \ +RUN echo "==> Installing Components" \ # Download Git RADAR-Backend release && echo "==> Downloading RADAR-CNS/RADAR-Backend v0.1-beta.1 release from GitHub" \ && cd /usr/local && mkdir RADAR-Backend -ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radarbackend-0.1.jar /usr/share/java/ +ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-alpha.2/radar-backend-0.1-alpha.2.jar /usr/share/java/ # Load topics validator COPY ["./init.sh", "./kafka_status.sh", "/home/"] diff --git a/images/radar-hdfs-connector/Dockerfile b/images/radar-hdfs-connector/Dockerfile index 3a2a794de..e83e0dc8a 100644 --- a/images/radar-hdfs-connector/Dockerfile +++ b/images/radar-hdfs-connector/Dockerfile @@ -17,8 +17,13 @@ MAINTAINER Nivethika M , Joris B , Fra LABEL description="RADAR-CNS Backend- HDFS Sink Connector" -# Deploy RADAR-Backend - Mongodb Sink Connector -ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radar-hdfs-connector-0.1.jar /etc/kafka-connect/jars/ +# Install RADAR-Backend +RUN echo "==> Installing Components" \ + # Download Git RADAR-Backend release + && echo "==> Downloading RADAR-CNS/RADAR-HDFS-Sink-Connector 0.1-alpha.1 release from GitHub" + +# Deploy RADAR-Backend - HDFS Sink Connector +ADD https://github.com/RADAR-CNS/RADAR-HDFS-Sink-Connector/releases/download/0.1-alpha.1/radar-hdfs-sink-connector-0.1-alpha.1.jar /etc/kafka-connect/jars/ # Load topics validator COPY ./kafka_status.sh /home/kafka_status.sh diff --git a/images/radar-mongodb-connector/Dockerfile b/images/radar-mongodb-connector/Dockerfile index fdb3b02a3..bad4d8699 100644 --- a/images/radar-mongodb-connector/Dockerfile +++ b/images/radar-mongodb-connector/Dockerfile @@ -17,8 +17,13 @@ MAINTAINER Nivethika M , Joris B , Fra LABEL description="RADAR-CNS Backend- MongoDB Sink Connector" +# Install RADAR-Backend +RUN echo "==> Installing Components" \ + # Download Git RADAR-Backend release + && echo "==> Downloading RADAR-CNS/RADAR-MongoDB-Sink-Connector 0.1-alpha.1 release from GitHub" + # Deploy RADAR-Backend - Mongodb Sink Connector -ADD https://github.com/RADAR-CNS/RADAR-Backend/releases/download/0.1-beta.1/radar-mongodb-connector-0.1.jar /etc/kafka-connect/jars/ +ADD https://github.com/RADAR-CNS/RADAR-MongoDB-Sink-Connector/releases/download/0.1-alpha.1/radar-mongodb-sink-connector-0.1-alpha.1.jar /etc/kafka-connect/jars/ # Load topics validator COPY ./kafka_status.sh /home/kafka_status.sh From 162b310e65f056d3f25bf7dc914bad67aa887784 Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 6 Mar 2017 18:16:17 +0100 Subject: [PATCH 083/197] new docker images with new-repo releases --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index faabf934a..f228f99f5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -235,7 +235,7 @@ services: # RADAR mongo connector # #---------------------------------------------------------------------------# radar-mongodb-connector: - image: radarcns/radar-mongodb-connector-auto:0.1 + image: radarcns/radar-mongodb-connector-auto:0.2 restart: on-failure volumes: - ./sink-mongo.properties:/etc/kafka-connect/sink.properties @@ -279,7 +279,7 @@ services: # RADAR HDFS connector # #---------------------------------------------------------------------------# radar-hdfs-connector: - image: radarcns/radar-hdfs-connector-auto:0.1 + image: radarcns/radar-hdfs-connector-auto:0.2 restart: on-failure volumes: - ./sink-hdfs.properties:/etc/kafka-connect/sink-hdfs.properties @@ -323,7 +323,7 @@ services: # RADAR backend streams # #---------------------------------------------------------------------------# radar-backend-stream: - image: radarcns/radar-backend-kafka-auto:0.1 + image: radarcns/radar-backend-kafka-auto:0.2 command: - stream networks: @@ -346,7 +346,7 @@ services: # RADAR backend monitor # #---------------------------------------------------------------------------# radar-backend-monitor: - image: radarcns/radar-backend-kafka-auto:0.1 + image: radarcns/radar-backend-kafka-auto:0.2 command: - monitor networks: From 0bb04363a9b1f7424990bfbc0af0c7454279d6a5 Mon Sep 17 00:00:00 2001 From: nivethika Date: Tue, 7 Mar 2017 12:28:58 +0100 Subject: [PATCH 084/197] update backend and hdfs-connector images --- images/radar-backend-kafka/init.sh | 2 +- images/radar-hdfs-connector/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/radar-backend-kafka/init.sh b/images/radar-backend-kafka/init.sh index 788df36ee..3f36a6835 100755 --- a/images/radar-backend-kafka/init.sh +++ b/images/radar-backend-kafka/init.sh @@ -6,4 +6,4 @@ echo "===> Waiting RADAR-CNS topics ... " # Start streams echo "===> Starting " $1 "...." -./usr/bin/java -jar /usr/share/java/radarbackend-0.1.jar -c /etc/radar.yml $1 \ No newline at end of file +./usr/bin/java -jar /usr/share/java/radar-backend-*.jar -c /etc/radar.yml $1 \ No newline at end of file diff --git a/images/radar-hdfs-connector/Dockerfile b/images/radar-hdfs-connector/Dockerfile index e83e0dc8a..7ffad215d 100644 --- a/images/radar-hdfs-connector/Dockerfile +++ b/images/radar-hdfs-connector/Dockerfile @@ -23,7 +23,7 @@ RUN echo "==> Installing Components" \ && echo "==> Downloading RADAR-CNS/RADAR-HDFS-Sink-Connector 0.1-alpha.1 release from GitHub" # Deploy RADAR-Backend - HDFS Sink Connector -ADD https://github.com/RADAR-CNS/RADAR-HDFS-Sink-Connector/releases/download/0.1-alpha.1/radar-hdfs-sink-connector-0.1-alpha.1.jar /etc/kafka-connect/jars/ +ADD https://github.com/RADAR-CNS/RADAR-HDFS-Sink-Connector/releases/download/0.1-alpha.1/radar-hdfs-sink-connector-0.1-alpha.1-sources.jar /etc/kafka-connect/jars/ # Load topics validator COPY ./kafka_status.sh /home/kafka_status.sh From c741fbddd44d9b7d5c9f5f8e403a33647d20308a Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 27 Mar 2017 17:40:35 +0100 Subject: [PATCH 085/197] Network script to check network connectivity --- scripts/README.md | 5 +++ scripts/check-radar-network.sh | 77 ++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 scripts/README.md create mode 100644 scripts/check-radar-network.sh diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000..426638338 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,5 @@ +## Scripts + +This folder contains useful scripts to manage the server where the RADAR-CNS Platform is running. + +- `check-radar-network.sh` checks if the machine can reach internet. diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh new file mode 100644 index 000000000..82b44e9f9 --- /dev/null +++ b/scripts/check-radar-network.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# network interface +nic=wlp5s1 +# lock file +lockfile=/home/radar/RADAR-Network/LOCK_RETRY +# log file +logfile=/home/radar/RADAR-Network/radar-network.log + +# maximum file size in byte to rotate log +minimumsize=10000000 + +# current time +timestamp=$(date '+%d/%m/%Y %H:%M:%S'); + +# write message in the log file +log_info() { + echo "$timestamp - $@" >> $logfile 2>&1 +} + +# check connection +isConnected() { + case "$(curl -s --max-time 5 -I http://www.kcl.ac.uk | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in + [23]) log_info "HTTP connectivity is up" && return 0;; + 5) log_info "The web proxy won't let us through" && return 1;; + *) log_info "The network is down or very slow" && return 1;; +esac +} + +# force connection +connect() { + log_info "Forcing reconnection" + sudo ifconfig $nic down >> $logfile 2>&1 + log_info "Turning wifi NIC off" + sudo ifconfig $nic up >> $logfile 2>&1 + log_info "Turning wifi NIC on" + #sudo wpa_supplicant -i $nic -Dwext -c/etc/wpa_supplicant/wpa_supplicant.conf >> $logfile 2>&1 + #log_info "Authenticating" + #sudo dhclient $nic >> $logfile 2>&1 + #log_info "Getting IP address" + log_info "Completed" +} + +# remove old lock +checkLock() { + uptime=$( $logfile +fi \ No newline at end of file From 462a5492e3fdc99f97b9c840cb10f03e36e9a9bd Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 28 Mar 2017 09:53:25 +0100 Subject: [PATCH 086/197] removed race condition on network restart and added brute-force --- scripts/check-radar-network.sh | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh index 82b44e9f9..87a4818e3 100644 --- a/scripts/check-radar-network.sh +++ b/scripts/check-radar-network.sh @@ -30,14 +30,20 @@ esac # force connection connect() { log_info "Forcing reconnection" - sudo ifconfig $nic down >> $logfile 2>&1 + sudo ifdown --force $nic >> $logfile 2>&1 log_info "Turning wifi NIC off" - sudo ifconfig $nic up >> $logfile 2>&1 + sleep 10 + sudo ifup $nic >> $logfile 2>&1 log_info "Turning wifi NIC on" - #sudo wpa_supplicant -i $nic -Dwext -c/etc/wpa_supplicant/wpa_supplicant.conf >> $logfile 2>&1 - #log_info "Authenticating" - #sudo dhclient $nic >> $logfile 2>&1 - #log_info "Getting IP address" + log_info "Double checking ..." + if ! isConnected; then + log_info "***** BRUTE-FORCE *****" + sudo systemctl restart networking >> $logfile 2>&1 + log_info "***** Restart network service *****" + sudo service ntp restart >> $logfile 2>&1 + log_info "***** Restart NTP *****" + ntpq -p >> $logfile 2>&1 + fi log_info "Completed" } From d80b48bdbf1f8939d7c166ace0ed324015bc1e6c Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 28 Mar 2017 10:12:10 +0100 Subject: [PATCH 087/197] Instruction to run the network script --- scripts/README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/README.md b/scripts/README.md index 426638338..68910ac8b 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -2,4 +2,9 @@ This folder contains useful scripts to manage the server where the RADAR-CNS Platform is running. -- `check-radar-network.sh` checks if the machine can reach internet. +- `check-radar-network.sh` checks if the machine is connected to internet. The check is done "curling" `http://www.kcl.ac.uk`. The script can be parametrised with + - `nic` is the internet gateway + - `lockfile` lock usefull to check whether there is a previous instance still running + - `logfile` is the log file where the script logs each operation + +To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the scritp are absolute. From 774bfb81ad238a4b34ec8caa0851eeef1296b9b6 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 28 Mar 2017 11:50:12 +0100 Subject: [PATCH 088/197] Added url parameter --- scripts/README.md | 5 +++-- scripts/check-radar-network.sh | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/README.md b/scripts/README.md index 68910ac8b..c0ef0a24a 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -2,9 +2,10 @@ This folder contains useful scripts to manage the server where the RADAR-CNS Platform is running. -- `check-radar-network.sh` checks if the machine is connected to internet. The check is done "curling" `http://www.kcl.ac.uk`. The script can be parametrised with +- `check-radar-network.sh` checks if the machine is connected to internet. The script can be parametrised with - `nic` is the internet gateway - `lockfile` lock usefull to check whether there is a previous instance still running - `logfile` is the log file where the script logs each operation + - `url` is the web site used to check the connectivity -To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the scritp are absolute. +To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the script are absolute. diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh index 87a4818e3..93a5688eb 100644 --- a/scripts/check-radar-network.sh +++ b/scripts/check-radar-network.sh @@ -6,6 +6,8 @@ nic=wlp5s1 lockfile=/home/radar/RADAR-Network/LOCK_RETRY # log file logfile=/home/radar/RADAR-Network/radar-network.log +# url to check against +url=https://www.empatica.com # maximum file size in byte to rotate log minimumsize=10000000 @@ -20,7 +22,7 @@ log_info() { # check connection isConnected() { - case "$(curl -s --max-time 5 -I http://www.kcl.ac.uk | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in + case "$(curl -s --max-time 5 -I $url | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in [23]) log_info "HTTP connectivity is up" && return 0;; 5) log_info "The web proxy won't let us through" && return 1;; *) log_info "The network is down or very slow" && return 1;; From 83aa8edb1ffa84ed48133683be0a79a3c12cb559 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Tue, 28 Mar 2017 13:46:44 +0100 Subject: [PATCH 089/197] Removed brute-force cause issue with docker --- scripts/check-radar-network.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh index 93a5688eb..3164ba661 100644 --- a/scripts/check-radar-network.sh +++ b/scripts/check-radar-network.sh @@ -39,12 +39,12 @@ connect() { log_info "Turning wifi NIC on" log_info "Double checking ..." if ! isConnected; then - log_info "***** BRUTE-FORCE *****" - sudo systemctl restart networking >> $logfile 2>&1 - log_info "***** Restart network service *****" - sudo service ntp restart >> $logfile 2>&1 - log_info "***** Restart NTP *****" - ntpq -p >> $logfile 2>&1 + log_info "Forcing reconnection with a sleep time of 30 sec ..." + sudo ifdown --force $nic >> $logfile 2>&1 + log_info "Turning wifi NIC off" + sleep 30 + sudo ifup $nic >> $logfile 2>&1 + log_info "Turning wifi NIC on" fi log_info "Completed" } From eaa059e27b35eedcc3080363e9af0b7e9e566e72 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 29 Mar 2017 10:46:20 +0100 Subject: [PATCH 090/197] Added retry to curl --- scripts/check-radar-network.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh index 3164ba661..15713f09c 100644 --- a/scripts/check-radar-network.sh +++ b/scripts/check-radar-network.sh @@ -22,7 +22,7 @@ log_info() { # check connection isConnected() { - case "$(curl -s --max-time 5 -I $url | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in + case "$(curl -s --max-time 10 --retry 5 -I $url | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in [23]) log_info "HTTP connectivity is up" && return 0;; 5) log_info "The web proxy won't let us through" && return 1;; *) log_info "The network is down or very slow" && return 1;; From e2f0827842159738a9efd5e10b80cbc036995b99 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 29 Mar 2017 16:50:21 +0100 Subject: [PATCH 091/197] Added topic application_external_time --- dcompose-stack/radar-cp-hadoop-stack/.env | 2 +- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env index f5d507424..b9cff2694 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ b/dcompose-stack/radar-cp-hadoop-stack/.env @@ -3,7 +3,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 15986f4b2..2541eb836 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,7 +1,7 @@ #!/bin/bash -RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime" -RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime" +RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time" +RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime, application_external_time" command_exists() { command -v "$@" > /dev/null 2>&1 From 88dd83817c03e9cbefcc9af86f46e0c631f160de Mon Sep 17 00:00:00 2001 From: Nivethika Mahasivam Date: Thu, 30 Mar 2017 16:00:21 +0200 Subject: [PATCH 092/197] Update multipe hdfs connector properties doc --- README.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index fbe8d970f..31a2516e3 100644 --- a/README.md +++ b/README.md @@ -107,11 +107,32 @@ To run RADAR-CNS stack in a single node setup: ``` 3. Create `smtp.env` and configure your email settings following `smtp.env.template`. Configure alternative mail providers like Amazon SES or Gmail by using the parameters of the [`namshi/smtp` Docker image](https://hub.docker.com/r/namshi/smtp/). 4. (Optional) Modify flush.size and HDFS direcotory for Cold storage in `sink-hdfs.properties` - + ```ini flush.size= topics.dir=/path/to/data ``` + Note: To have different flush.size for different topics, you can create multipe property configurations for a single connector. To do that, + + 4.1 Create multipe property files that have different `flush.size` for given topics. + Examples [sink-hdfs-high.properties](https://github.com/RADAR-CNS/RADAR-Docker/blob/dev/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties) , [sink-hdfs-low.properties](https://github.com/RADAR-CNS/RADAR-Docker/blob/dev/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties) + + 4.2 Add `CONNECTOR_PROPERTY_FILE_PREFIX: ` enviornment variable to `radar-hdfs-connector` service in `docker-compose` file. + + 4.3 Add created property files to the `radar-hdfs-connector` service in `docker-compose` with name abides to prefix-value mentioned in `CONNECTOR_PROPERTY_FILE_PREFIX` + + ```ini + radar-hdfs-connector: + image: radarcns/radar-hdfs-connector-auto:0.2 + restart: on-failure + volumes: + - ./sink-hdfs-high.properties:/etc/kafka-connect/sink-hdfs-high.properties + - ./sink-hdfs-low.properties:/etc/kafka-connect/sink-hdfs-low.properties + environment: + CONNECT_BOOTSTRAP_SERVERS: PLAINTEXT://kafka-1:9092,PLAINTEXT://kafka-2:9092,PLAINTEXT://kafka-3:9092 + CONNECTOR_PROPERTY_FILE_PREFIX: "sink-hdfs" + ``` + 5. Configure Hot Storage settings in `.env` file ```ini From cd25939966cc6a9892a8700ac14ecf052ad38f2b Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 6 Apr 2017 13:20:16 +0100 Subject: [PATCH 093/197] Admin Dashboard --- dashboard/README.md | 3 + dashboard/cadvisor.png | Bin 0 -> 25022 bytes dashboard/kafka.png | Bin 0 -> 22340 bytes dashboard/portainer.png | Bin 0 -> 7071 bytes dashboard/radaradmin.html | 212 ++++++++++++++++++++++++++++++++++++++ dashboard/tomcat.png | Bin 0 -> 18593 bytes 6 files changed, 215 insertions(+) create mode 100644 dashboard/README.md create mode 100644 dashboard/cadvisor.png create mode 100644 dashboard/kafka.png create mode 100644 dashboard/portainer.png create mode 100644 dashboard/radaradmin.html create mode 100644 dashboard/tomcat.png diff --git a/dashboard/README.md b/dashboard/README.md new file mode 100644 index 000000000..b3eaabbda --- /dev/null +++ b/dashboard/README.md @@ -0,0 +1,3 @@ +# RADAR-Admin Dashboard + +First basic example of RADAR-CNS administrative dashboard. The vast majority of information available on this dashboard is hardcoded. At present, only the IP address of the machine hosting the RADAR-CNS Platform can be parametrised. \ No newline at end of file diff --git a/dashboard/cadvisor.png b/dashboard/cadvisor.png new file mode 100644 index 0000000000000000000000000000000000000000..e70ce37bde92aaa5fa3c9d094412af256a3703f1 GIT binary patch literal 25022 zcmeFY^;=Zm8#X%hNREhrNDKOt1 z?sxG$=lvhf4+k$UE@$sqd+oKJ^~8POPn4>%ED=5xJ_G_Gl9!WGhd?kA?tY<|;1kxf zp|9W%mMcPD6AFdSuc-V2|HU;`kd=bm-u?U3Qj`QfN%7ItaZ@+(ggd)9S=!iJz}>u^ zE#MNWs+-;tsPi}tOqCnSfW<@l6^{3N; ze{L<28lPyySO!I>d=|@hE>^*))l;Rm8QQ?Y8M4w0f z-2Fv7Z(ja8pZU))?MAR#ue$N4yMnzahWHr-jpTLPW4oS$K;RI0DG5!l%>`806R*h6>2*`na2#Na`)msw}PSe64)@ z7I~fTh1B=qKw;P%u+VW-t=6QTrn}91lzI#-fUcS4SyQ7R_Y>9LCyHXyTxY%yewVqz@`JFaB&xQ59iQ}`n z?rQXUE)FF+I>iYMu10@1Zg%rN-8EDZ@gCbdlz={Vpz5LygF#pR{P@9?L*4r3u(0s` zNJd0R$Lsu^IhZ&?F5lrNj5*q6eS5Y(;F$ROp;#76++2y1XXO}*Wz+HIT<1AAIk#*>T-V8vPLtirJ`cK|r&4g3 zIFwVnL|~jpX|krcc;l&Tf??a6&_?Z}9L(T$y6cWTg~T5^1TUd?W0+Q+oiy!biTFCM zcp@NSVUYN)1kozj+g2t$Jw1+I#p-lCrAbXB4P6eu%l6sMd_4x3$i)c^0S|i7de+&2 zb?Ke$RGRe|o4j662wUQ3gV|*ow-v<^6mda^RQqoD&H3Q>n!|iO?a%s(THfJ9;S60K zawO71mzIV`$mgoNf1d*bA`Yb}h`y15&NjLeB%ydii>Kx==n3mWe^E)b+3k&gi--tuWN2A@1yO^K}m3yABQuV_!|a7D7WfczBKuR-PgyHOw9)t=?WGz2^9~ zchG``g+-g7Pu5e*@kSEN=E33NOvN(4@2($97B05tJzB(D*yhfLi7|uSy{)a!`@!zNOb&f>XS71)zD_h|xRGK{>7e&L>tW}O-!>d%g zmCoDLtXm8S`IDVctZR=Z{~db^j#@omu=33|%v4cYZDa@t6q_Ssgj&So>MSZLQeaSX+axP>O|x@jv26atZy4BdXNWA;(6j5m+@?BT=YgO_prAg+}+w&DyP~ zbMJ4TG%Od8m{9nlp`ksmbDW87?X);lvf~pxx(Cs5mg&g3fdPS_*VmVB_n&63@(~d^ z%vK3dJ0AE5$4?n#CR*r@t5J+;B9XjGlRSCF(Woz|!eSI^Jku#XI~XPfH3<`lm!wqV z=!8BcHjCw4=g+>(obtT~W|5|-tfXYz5@6=#MHv(>*C3FUCq72*XgEb|#YUh}f(wTt zoK#NB^y}@K=@8JMJm#5l<0g;m#?dVrYU;mRTLidQj`i~!Zspx|dCFkCe*I|wHI=i&-CyTK6n7$aY7B^;X99+S7|SUsuxP@Yf zh%DNVo9&p`PC=y9ws$uhy|>6qS7NS3;c$j*xl94y-J`VXj@Mr+D(ag`Cif1T7T8lK zb=|+%rKgNo9-jNL=ZvBcH{2`(0z?IJ6bsew-a&a1!K`Twh5z^!PPa>Pthz^n`HeEq zh~piCeDXgeas&;&M;r8m)C>xF2azBHBhIg$d|RYV=-WE}mdT#6_-uA3kwHFDbBg{s z8UBXbvt;F-yvp&)O5anNq~af@ffCMbCcF^(znhz;wzVf5wYHEDcuj5XR8d(^B>{bG zk7=dO_V%nRdumBbSy@>vFC~6srgD>c{j7Z+e9S>4I`5B5gW|J{@8+aYQfyQ)eGd-K zy;zb9l}J$`Qcf9u^(??B&1OrO253dTWdC22>$@Lk-AQdwd(6g(6 zKn~cxkFavl(X$5ZS<15OPbRs!A82Uw_?f)P7>@q4A%Vz+ z&)a{!n=qkKh3qdJ|47$Gco^i}tAdG*qd5JS7{SU#U0@9i9pw|TgCk)nuL`=a{CG&S zR0`n7R^aN_TeqBMNW7aTq@x_~XYVASNt~xZOVX(MQlH%yhYC%E%R~ z4zIg_&EOO>W+oF2eD0HR?28ljE+vILV??W-{80qhaY}3nYdZ&;$8;60O%|cFq!7q} zCHJho2dUy_l@uhHr<-svF@{A@l*p{unH{=yb>%nfLaRfV=pQh(*+LFAvEyf}S_qeE zZ=UUi@B1;6VVCGZun_y_=`-jmYz%`&$r&!TgHyMed!m0(7n0C>;UR3Ex%78$FFcBe ziD4?z^7F4aG_(UgMBo6T0#zs+8qk&ym3$0u-|^sGE%nA~|A8_A7NG?lclo)^m(n!G z{85jW(C8OVwh}X2#L(Jro$*TC4sg@!u-RRrp_O+G!>Dc^XrUGnmRa|NM=1_Qw`H|S z3dI~;r|l(S*gN=sP)|7`uqvx-UI{*mVMGzS=+RLLwmx#!cd zGnGRRZg?@WYm7%pYqLe=U1ejtdhut@T3Z;HY6J>(ZeKU9#i1w;?O#7c?uw?M|M@?wAPaq6LMXj}=Z$F!4)tRU;8j$?mfoCA znpRr(kZL@prA+8k&J-;Bs&ko+rTed+DoMF0@S-|rxzYQvrdV6%_-Apb=ic0LfwqyU zZ_zYqp_bhs$PW?%j#jPNrmZC@Yb7C&FgUi5kWkx=o$=x2bzGfwe)|vGoo@lFy$ON; zEOXy6S)ltk*y8$jZbkwRj?2r-Wgb`2zqYLY7KPGd*WW@mABpCQ?F;W6dl@*UGRQZ>Uxz{H zt4hgY!WH>46bmOdc$l}6l06B+ez7_=-~8NN+^UVtz%JA(i9w;9+Af!(9x%~|)kYy_ z8gH__DOMs>{bqtv@R&?Cb57k?kWrJgk8#5-qCVWCxrlr=ITkhD;^tih!{8 z1chono}Mon%{Dw+{A@5@kBbyYLgm77H9=NhoLBvu96j3b#xmPK2tNurC?N{%#(3e?Fz#d9|uQA$8g)52?}Gs zf4?xE>6Bn=luRKeYwDK*&_9YN3wQN2ndP9B3MV`Bbvw(aQg-|X0l~ACHGfhxx%^KS zW?j)Q7&QL-_Qijs!k{rLy|A_>>|}Sg+INOszADvN)pb-H%8R7Q$rOI``_G7ey+VO{ zoQPz9s*(43d+_zha*kFQ*vU^ebCGoZR~su&3wQHEBVoVY1W$$Qd_j`qn5vMo_k)u| zaWu6Jq81C`Gd9il%fsVLz1M5xd)SL~;Kovg>M*U1<+Z9-&x@6_4Yq$Pb= zkLkgW4eq}9oPNvdkqz?=k<~-*91%C`nf!#mf=Ew|W7;6tcj+f|{znIK8TIDgxwvTu zefPx!(2v`CwC-&YYm{8L?#vzU-TsQ~jeOuw07HO;!r1R1JUcskYOt%~3N zN+d{&=Ijqj!+AyH=d00`=xmWcZh|wPtd8ojAkNplaU^0_UX~uyr5{|WPn~X#_uM@^ z%CoZ`!wR+BEp2@+oZ1czbyJ9Xf7skABp#iu`eqBcy+xnbejxNWw@vrhm{=nby|NZf zGpH`oj+mRCnD__wc%jPs0pt_CNYco<=-GakLh1{O7{+`RCT#IvZjCo*2{-Lq2c>nz zkMRn%ju|pW0xq3y_&CL!%&!xA^C6Ju&!4|&-M^}H(4Xlo(EFZZY_j**7X$u7Y$OLM6hV|gI?2YZstLx=U!)&zncy}E3&2)Y1 zl=b83d+CMOlLllRtrY=lajmUOxZgXiAsMjW9!+?7c=+L+!`o~k zD|z&o7?5AyH^odOt>B#um94sbaUo-~Asax;eX7E(PlhZ)5w6FA@mod2W}qJL=O<)NNOT+>{uUY z@-WK#+?-pH(DxN{n+`CAqt!+D-YA;LuUA+l5@KY;x@2Qj^a?(IH(c zXPpG2V($C!kT3E+5rd2)hZl1*OO%vbltV14aA=b#Z8X z<&t~hP&*205c(R6!}W^hn}`uhyF`p)$wXMg_u=zM{^V!@=} z-rMrOuGMzntDMDU2q#vEJ>VV{Zt>isjjgP^2$6=~$6gr-y(HOQCP`O8G1pjKi7uBI z2NdMD&``qId`99jURD@1G+yAAy8>_aLF&nyt8RIOoO?wy3?gpFe+6QS2j3+2zQoag zE8Hq#wGLb4p$~G>bANyHohfG`p$+>gUl5J(eMU{(cJ`YfppAs`0UHu2cJ_DGwDKDm z)#`3k60r;ZoKfV6&Ziuk)S=@m*T~M5eSAD~@9E<%NH6Y0*Qc(D9S^bVqsgqStP8%> zM#&5sYD%y54JI539q&=i-4vhW;jv%8IS z*o1`TZ{7-%Z{JI)#;=}id)tE&%l*;2=sa_-Gy`_+esoMvB8mkB!pe36kEW3g7jI$O zbXHB*DpH!1ESep>6S4P--zmYC?g!3T5jndzszhOG3^64fvek4vEGt+>i3P z6On5-*O%Z3gODnB&GprK$lt$3MYvV_);-+8Hr=v%9;+j0(XlPy+J`+gpO@f4R5$op zQ%KcBN<)uNPxt2A0+(C;_3IoQ?CkQ57&vCR(+e9i%OxYT0}e-WC}MgBPaYsNku7ho zd%0CaHcWPrN?*6f=Dg7@r$t3p(BjqsT>7J*08SBZt#?$%37Xv4-r5U1X+U?DtpG&B zOE@0ZNof`<5Wnhe1Pk*M4cuBEQ4ziI^77J2d6+uCcI=ZoaKUlp6WNu^GI-V<#x``U z&Z;!oY&2RONnBfBJ5^!@Lj<%1UY_iXirfk#_Lo|G!x0!}ib9dA{1Pz!cm)R1RCWV? zm!rSK!-Ty*_fF?`JYK7FZX}nj?05f>Vk4has21HeMZC>JL34wHZyvTz#NE>-c7Fru9j*so_E-WA5+Zj*ix))=_q2-N^}3s>gH*eXF0884Eai_3Q0LkA^3Y=dixKL>$teY%dNGbcP?I zk1vicEGW&4U_Ad@#tjXtsx-wfjAT&k;a37J{FH=74K8uhDaB88(o=>gXy;o>cJH^x zLBJdZfE(Z62j`C=Yz*Kx_4Z2!r{Ufr>k~`kyT?8WbVAA`Fe5H5`Y`G~4Co#}L{_eF zBw!a87uehr@^syAs3GR|_N6*(Qx=2kr{_jqD*`z)`lmEQK6XN$yNV2?6yJ??>LNtK z-l5MC^nvI9)>fK4C397a+2Y(w(`OeEXhAIiW&j+{IXRm3?+CHO+|ddioq6?aqzDdu7>tVUag)bR`Z5SX zTV0eHS){lj!7-`qVrLEqzmidTuso4}{mO?GHu+uu%4|){{}^$l@wo8A!<;Wq9Eb-@ z{QgsIoA~=YrZ~g+Hm*WD+gKbbkj+XSbGu4+vvPZV**3c7@*{!6`eUIDb)Um*vu@SA z_O~i|=o`T-x9;@KN2{~y&XxLpM#~B>+7#VCclLxZ#}ks=qT6n^NR|R~dfN!8WV&`% zQc{+dXrH2GVGv976U~E1g|BUq1oe)~-_eIM_|Kgw#SYF^+rKj9q1FG3Hsyo0o*wkY zF|eA(Dilm4+-%4$-!7d(AcYZ(kk{z%G(Hb4L)V`N!!R+RxK|kw5wT4iniBucuM**! z$WZza6_I0xGA6Zi({ankj~`z;E#G9U_9cXJwQ%>}_^VLzl&Uf>H2U{>4H1H}N{=#! zQk$$`lh_h!;_K_nOvZDZ8woQ4OS+lNTDUIDQ%;Q^9=j0E`5o;dcCjMXay+fyONRj& z;~Cj-cJuInc1E)dNAc+g2Dbe-8sA!;FB>nFucjdq3ftV=;5=hfBa4iTyuI|iy#cAR zW!rPyi_2ys6=d8~2pN9(`}g-lZLs+jVsi6gegANjRRfzo+T8tGC=H32i)88 zMbC@9r53wE7Qq_e^gFgVdk?&>B5%(UJolD33#cGLBN;ChJc=!sUrqAJC)PPGVk$Of zMuL#mMIt(ES>2l>9Y-cz`A@!5kY_jG=IZ=q;J-;XL5F55C_Qv#B!ds>FAakrJAVZD##+F~mlhW62*X2h8ut2ZNMB_>Jh1Zf*^>WR#Xc5Uef?L0donx?1yxOoXiH z&3^vI#)fwiV>^8q05-WOILqPCT+>Rgiys?FOe$0fpohrj~!9a7a)P zqL_zU#B7Wf6zRz}PsYDIVU?-lJUu?fOfqUecSVEkv8tYp*jvg<%jVwql&X9Dq?36Q^^&0bSy*vMjpQ`H*B)B9h@9r zdbaQ9kjrZHYkq!?$Vidy&E}thCeKKWpl0~hKZ-WF?gz-SDW+e=nw#2AEfyS(1);F$ zz6ZeV0X(75r?9AGsJ|yVuUD5eR36ys(=no$H>HM5 ztI^D~wCrBWL*c-K&K?rcGu0G}r}WOy70;U!q4#p#a?gGwJ{E{?@;Z28*v!4ey|XhF z{*jhjotbPTpvm6(Zc14L%r3`X&*`OpdFPB;ebp6wwig$xsH8k$i!VG%C+4y3yL?G^ zG58}CD}pUM%k|)Q!R^V86Z+8mwF^SFLZv{x-f@v329HcT9A;WQyDfu1E&}QOi9V!i zyN{rZlaZB`-Eg$M*lfM@4D|fdfB!4xed9V1abcZ%fqR#2SLdT6f?8zRXs6{yUQs^c zVMW}ol@wI!xP^qY)J>8G_-DKEMjBk6h7jSHDRK-_YH51)i;t1VG78M}n#V)I2fAe|y|^HM19h779n? z)@6ynI!!D21qIVbGpsw-Tn=#8`(X$q@?V3&-E4LISzXB#q>+=c42)haw684{pZz`< zh`ML6j`KQ}AiVQ(uf-F6vh$+#Xkw_MnbcNS?&kAti}THu3UDv3R--ac?l10r!3a}z z2?#LtYp$=K=|3iDFm$Bo?cZK_U}i7;Uo01=D5sd1@$Tt?u-HqX)B3=}mqp6ie%Hq{ zIRWc7!*6^>|NJ`#InW=pQ>*vt#j!nKQ=%;%!wx3Ns0LQ{WAdb6&KguF<>iQu0tgvCAr)c%pKX2zO#7;Q};0@4S-m( z{4L;me)-13%Bn~V2A7H%{;s5=!jvShH)|Olt_wrFmt#ocd*6c#aTcVRn!-oK!Jew& zL=bDRx>~2d;xUz?lp420t@(-OykexIzUAQou$o5kkM3^l(Kj%9B+`bLQSgz02@w|5 zgdQBhk*u-*qMz_QYQ%+zzs2sb-o!dKNh&taRsBT=-874hDwNJ2Z)j*3_dVs0e;UMG zg?LF?-mDLSKyx*@!#Z+Yk6(TB!F)b^jJKe!K0;@ zG8!Hz6tvj9|1gI=W2CX0GQ(f^_*LBLFyfg{?ibW%s4dQ*@--mfmiyKFfH{#z<6QK- zrnZ0b#rRfTV2iR)Yh5QcL9@cq-lH9?e{awxvx-6v@mrZ;=})i_8Ov^x*K=#ROuSm( z?2a38AmTiex)!~}b8uLD`YD#Z;?ML?0$$m&&`0O5crspfnKYMPe&uuSKbi!@iBLQ} zb{K5Ld~~1M%Fn&F6^8K2?)$mG!Fe(*8T-DSp!QK6r-`ivanoYN``kUr>PftgXM&mE zW#Z%|$g+rwikto<8qAlks}ER@z6)Os$qX6$sHOyc!%uA~`HoT2NkoEj(#M=Il%tj_ za&m>^Zwng0ef*%RTsD^Ex){tgo1lY!Y-Ba=e{eWWsx(9qi(9rqHoAAJpuU4+s})jc za}%_@!c*UDw_O%V-K(7R-099Cb?tb#3Okwq`b_x3SL5#hCeduP1oTbQqQj0u;qEId zm(m9$oBvk}fbyj$uC8cL2nXQ#! zORcTqpD>J==%tWT*-Ny*Mf#~AiZxN`2jXI)meuBuKnP@r)d8E2?Rx*V`EMMMgw)u1qIuhExzKO$rJM zHa0dYEk2Br2@bY+m5r7Re!bJ{AO{xY*}}vVsNY(TZ({^6w~!rbZ8tl6N63TO<@xy5 z+^1bhJG-sjK3XhG4=UNb`?|$0Khae$NfBIWY-OKb{~<>$3VH1AUtZf4YlXpdQ#`kS zj!&8)%HUz#1bv>`60n|>lren5g|}c6m3)04i$qmyCMP8&6{alV;o^Y`(YS=`~)stFf>Ce}eTGU?uyaXuzukghL5k3x%Pvyz4?<1}P zZdXR1TyBGm1t4wm&yp&2nr)Az zveAvPWKxz3BzNiW_Fnxg%5U@mBD!Y+6j8-kRQ7zA)uSS9hwo)SAj#Pp$C(!9E()}b zkA33S@x~l_xvQ&>oU`WqSf$lKeo7#nFP;CFuYE+nPWW(m^81v^r-JyY*6Yh0?-Q7R zO-B+DX;U$*{>V303q;qO1Z z3dX_%FTneBt#PoyeanO0%B|s0!ulRczkU~zR3O`S-qhA)cj{Q}nHCcLO$_F>6E_n% zOwFkDqRD3`{r-HWGWr)>gLRUl7KM{DGf!&E>GH1Q`0-1{totfrTgCy$Q{Z8^ItKSk zgm@nR8keQJo)o*}er>rwN#SyNLQ^fRnZlDewC1?bj6vvY^`ozHekbruHjtl(=d;av zJ=NpTZ2iM6P~g01IU?nFpo;}zNGh(ZJU~r~$FE*o_>4?PSwRZ5cn|+R725Ie4JY`L z|6vp@*5op!Ibi1V;lL^IFvHmEfItSH`(ya4sr8YJ+keY9^Wq$@uRe)vS})Mh7Dm>V zcGs(uS-7|ek|$*BqyfFbZOQWO_4n`Jr{vo53=ihEDsHD=wgz+Zxnt9Z?Qb`@@)*{s zH$Wi(n>6o>ztCy!02LPC-W`s=m7t&4rF&c)J$n6*T0?1aZf@@8Oy%ZiEH`{i-cY7c zv@?>=;Uec|v!c#%-qu4L6T%+o!I-67@l}oh!JMUR+~{@h<6hc}=CknS+t9$trExXc zj=W`jXl|;p_c;luM1+kJbnk0xu62gTBu`ijbOs`FUu$by84fR5|8nx*(vPd_NEC+> zg#SM4mI{wrLU5V$%`YxWCq0f+@q^Nf<~X6BCUmbJ98_{5)!GN6O(rTw-Yfu{y-mH{ z3G_Fg$uImItwo)al_lhIL@5$cWU0Vh#Wob(?0@Ou!=C$}0;rlv@oN_zKLXgC=2xvH zZjYwx+8N`^!?vp-@%FGbc?oEii05uKt2XN*6oXzW@iRKdzpkL*qJcja?+$CqRow@c z2$(Y$2T`aASMprTmKx-fZih?LA^ZK8dpB|iZIg0t*Z_4}hk^JxM|Cs7`XYlXF&6;39$wj?_+EYRAiYpTvFyv= z&yyR|&irKfX;kPVBMBI7;O~RorCllE_wq#q;UP+J?(=tHQ&=O)FK|9G7^cPH`z`-pq05s{SQ-5g?&9 z_2N?hPi+6EZ^V3q0J`qqP^;Q@2;zz9+l6^&hF;xe?2~Jz2t|i(PnJ*3nS1 zzlk4?2@hgN&x~fzAmm$8CYcU|~U2Ssnr?Dd4CdBwD*8qoY<_X$VncmSjyNJ@#1jjDtth z`*l(L!|%LW3wiR1~k-YhyxauQgS1y03Bqz_(=h0mpGKFD@@_xk=b) zIYl@{4>pQ&GKG9S9N80J6;#dI=c>N5(A7Xf>HFS~e|zx(03Qek!{sNj(_XY*~B?2E6|F{n(g*Z2JL0T~%l znfo#?vYtC_7)VBB_7*d;vJ}2Efz`y2`Xim%w@7NcvAIh-$3teY5LMU7)N6VWcyo{d zL`bH?Q>Wdys6~A~r<>Xe|0%UevWE*5x{#oQ$^)T#rv#N)*{9>0g3nW5L7z5_>>i%$ z&CmH<`n+^nDrGt?|7ulk&~mYw)2xfCvmix$GrK$qJb2t@)D=o8s93hy-rwSTdlpK! z?vhOUY(x8vw)V`P-|EYe2p2kg{0~Im2er!?o+#s)7i;=#X*=b#Ilp1#(-xoPk$`0j zd;JPW=af8m`J8_#Btwg@^)*q>_Zd~(P1k8FU0g4UZ|%@0aPRcSqJ6Jo=M5qLP(2C@ za2p9bONPhu$uKTX&V!Cfy8ogPqda9hZ+t#Bu)OZUN`$$r56_m&lxGa~zr;a&QDx3- zb6x?CLqhjUIquQJE>tPQg^cgBoYA)HUEfL_YRzqPObFs^AQbPTW*If0$5zhK*=6bf zWK$c#=D7X)VPi-z-78%Emv**xYepqgI9a)9>v6drX5X6wuiukhk;lvd+^2*Hx%bH` zqSr?!J3oq6?4@HM9;dNf{V&bVHODwlw<;9=LE{YdMm@!4Y z70Gcjr#W=-#z@;7lEe-d#j7&Rghaf6R1iGFWl`NIZm0H=hb(N9O0}?=_#^ zoOLtQI}0CgBIwfP7}n9~`8hp9WhThF3zgPm79^@LOVs})KSAt2@))FKWFB+t#b#<# zQ{!}Rr#nbAd?sdR-w&8GJ!66gKl~?+0qHS)V`@6F;#?Ea12w%CXwEIey$J!BgF(-A0G1KOHig&th5#_aW@$3{)~DMDJG(> zi)#P9<^B7hq(A>qYO@w)5l#Py`}BW@cU;s4Qj1 zNTH01Q7of}KzJwO^*YeglR`(L)zsndqoPW$tL~_z)H}R>f+S*!m#A^4;l*Avr3Vwz zAv}g#YJ5MrxLn22arIr}wjE;_bM)`i%R9y$yvJlU@r?T5P|}>^Vyi#U;R$(0g4*l} z67VR-*MpA(a-3@87Oa7C?|B5+#H-mCf8}$MbK@m__2-wsq4POXYwYnPk}2Hfw@&u} zO!%bLcfy1P-=9GY*k7o8j^7jD;W^SbS3UKH%Uc@UE)NtHt@~5jJlxb~jy8SJlbhS& zf0<25t@Bgi8=r`zJ5YkRbBl)YG6Q95V#sf zdA*EmHRSPJ?H1rm&N_Qy+&s;v9*XC^FE9T-V|4AJudlyIWCrrWlIZ0<3hV<=Jm{vB z;FBx$q-SMiQ0BbtHTB3C**Z?KqAl}t-rP~mik8U->KkF7qcb2WsG4Q#JAfj+u)~5r z$9TvyyPyB2p0<5Q0KEr+r@y(qnxNHPgQ0XN?N`X5^om8?lat)5s}C9srqPG9_H`Gw zDWZG%Ws9w@=>3(aMJhSDYeBJWu_)B0OT)6)^;EW_E7Lf4Y!~lb&~Yr=(12!Rb+xOD z3l~UBh`e9N-lKPYFJ~GM;N|tXsmOOe<#}i*)=_Wz%gGNhWY2tBvK3zAkB#zy=sps1E2!c>m|5lxU9hE8hi!8KW66&3;L1y7) zZqCl-x!PL>SSKLI1$^O}zJu+%GDZ;%mSm8FgTOWdw$Mvhg5da(jTRD_&^I|fO&h~_ zad|PE-oXKl(Jy;~W;bZ|^~8n%BC)UtfGM9lu36pGVpsVqK>m+^d~!laOjuK2pKJ_a z@gctuOE!kZ^z0EYxA=ORJ36vw%aP&Z#5P3V{NN%=C3AGz+bRBr z5dG7&D*C;AWxjQKAqvGH4_M5yTK7+FPOm>L?|7t&VSU_wgy;tgk=5i0ggZ~ht+~?F zkwj*ag3=To*8ze23%1p>5Qs1BJnz^jwjC1aoAeY_MyZ7YUQKCUdCLgaY!33y$?OTQ+tMJAeOL6V+p5VuV_PDr2@`7O;f_#l=#lqFJvBxF3lS{J0u= z5+)95Z(9g7*YKMA<|_>(m}C!c`p|T3{Nt>z^W&$g#-e>F%~FTfJeq*C&`MSo*y1-v zAS)qijg@2I;^r0tS*nOo#;d}_VolMUnW6#Q$MjMF<;zu-gJEA#WGMOFG)S_WS#l_L z07vuA;yjc@OxddH)AmraCQFa$`0T8%1`gz?3P@oylxpS~n?{~R=MAm>^I8BBM#uVl zA#XV`A-3r(LoTAm{g&Lt=7MR;5AgvE`4LDw&zlSHhy8jrNTM2J{?N%b?R=z9h2P%$ z>F&d1mbi8y*RtE|ZtmCgLC8oB>W6Nzl|Ya>zr&YdT|c`>Ti>_l!cWcD6t~JB#sOre zKh%rCkdqsVTxo;m+~J5yRc0XlQZL4a1VJUZtL+6={4lBWvd0PtUuqWYMfrJ|e0g!ap4!*+E6{mzEo#+D%8t zvQ_|hpD{8{v&KW%XA;Z7z9phG34Q)n(e6o?w5>BY68TWNvZZBl=Va`((r;G)Nawh? z=4NK9|IPH@<#yMAkbU|ze=Tqvhrn$6^b$wnV|F&EtqXhQy=%gIEzNZc13J3c%iSBb zr${mHo&Q;{yZMzlZ?raMxv5i3)yOx=NkyQg{1r8oJ=!9@>5%-Y;CEG0M9)@iDGM8c z!}97uSXUW|LJ=Hd(BMOHko}rN^RTk; z>u|LW`Qn=|U0b@l=g`EB52ydk>+^mO_83}EBh*&J8e3`DZyJ3*_$ofUD5&eTp{%T} zt*x#eT5%Sad->wUs{Z=FaG--PJL=)s1#g)3h~{w-UZL2TigV>Jrm%L)YH+)|=czNb zY{Jueau{iBe^Gs1ApzgMGf^H(q3z#oYh#Aha)oaw`mys`<~dMg=<9X*C}s_=Vo_j0 zS6ar$&(F=D#v0ZGJb;Oc9&9typImfIs+F$`HE2$_ytojdtw5o)^ml_`?=(9CHkUq? zPEa0JQQU`!7g}E)P4mUpOnH=Q-MgT9K!^)&7p6_fzB%Ebi@VW9)(+(u>M97$fC_DM zU}xUbwPE3R-*N7TQG7=E&CSi-&CPZG^7SYa$)I=6azY-S8p2y}Y|$@Ljb`qDzNZ*O z$vCsE>!_u^f^{z~wVs?1r%O69KS~DD8wS#>tdiF1ER#EBmn3pZzuxJEIg=KPC!zSP zH>l_0S3&PXdwbCG-ps8v4jfxb>zj(Bg9gv0Bip$Z0vc;!SQLSGpJCdi)S8C|@pFQ7 zcW=-5w>mP;@4j3@=ybV}#dt|05V$%nw7S;oFi+&r=?NSK3}&&Z)c{G^@$oSb2q;ze zcf&AaYhqSgk`7xLcOUZRK=yBHv!WwPA6DiqB@L}@?$KF0IZJ~hzYm70POH!^}R zysNCn-qO{MT30_G68c$i35*`X zv5m5~sS++JETxnP7u&nu7}(j(7b;~)=90HaDUPrs1yHD^X8$7PUPo&nD+~rb34?uu zkFDe#Yv52Cnk-4ljF1k?X*MXvM}?-*xDz{U3<#VdI%2u$1ZuzgOK?Xe8{M^1rtlHLX!!D&J>1;v=W0O1!J4d$46(Z7tX^*j;vpE$rPg z9TYoc!%dJprf|}R>IExB>C|KQWCy8t)J;v^#>-h|o_rMMtt;_U{uqJKyParQq-|T? zbLMxWaOZug84z#45*Md(8l_(R zO{#9%s2a|I!d-4Y>y~4Xy8YUV`*-2R0*v0ee%@bH%y8prj0N$GOc4b1{vg$4`g+s! z%I?;})Yi69hYb@lJrOsCM``$m1XuF;^A&p_0e*%?qaR8ealJH*V?A9!th2qBr%b6b1Wi80{V45Wt*2Jtje|BnLetot*H8S>6nA4?m@zrPq| zTR+bzZwr(}RmrayEp+Ln^hqc5uGR4G=OyUp6ML(Eh9N@W|7%-#Do?tDp%*~HW1mh4 z!A9l9#;G{Y3jd(QDg`ywoSG&tXMq>abROGB4IC(fZh*rLu!&qM|Octr6w-QPAs(A z73 zFykn9hS(3ch{?iW^lZ@~ph4r$JBFxr`SD|s3q-n7?x32vyN_*eOIk7VIhg4ExOY0j z<>Q+DmTLh{3i%l@;J;_KupnPhddz*{h@7B6U4F9Plu-bQoci}XnxsP5svL_p z#Z~hLE?Cw@IoW0si!JdLv~5^d-(?eV+0+>1FHg&iw5%4@G`)Qs9l0|Ir-A61DlH=` zNx?-UA@=e=ZFYadVIU75$c=1??#W}u&x3!S*f8MB7(!dgG2KQsyc`*Gc_$YRJ$_E)W#Py@Wt`v$qsH5c-^ zB~s`6(>*Z}je2^Bl@xMAg6JO8fR1YxGK%*Wy6nm3x>}maW4a87!BKhrb`6xV@Ys z-vXXrCa>~aq2At6q{V-p>hK^`o^{NnCJezt^zg%)`sOWT*q0+`QRGzC3+Fby+@3>X z&c5+Ul9k`_uMB@3+0f8SrC40B_I_lqLUh;@4J_s(LXx{?O+yvmPx{m7UK zePWar?hB(l{?AVyI{HzD!8T(a)$}3A6bjB69_bW)Y>*UCddjE6E9>`7 zhE04NF9jvlKIpJ5S+kb*30^UmVjK>9D4ehqt3I z=kUQQ4Dx`Ps(xv~_T@ISefDh*hTxCAZ%gjG`Bp}pYs5075KxQh=jZ3V%+|HNe}|k$ z@&R`mzq&7&d5;t#UaT{??h;qmZCcuv(1}V?EcoMI1g?j{%3duAbH6YY$>{s+1Hphq zqn;%d%jlbO-Z5dxuS!N*M85KI^ioG=Ff~N!g$NL;Y5deMCX-ew05{wbKUK5aJ{Ng2 z9bO&vwxrf0-r1}B7X*g)KlQtSny7|`9NZ%SqLht@ARxrToHdFcLW0J|9vXS(s6(L` zY>pNw?xzAe787Z)s4rWru~m@XkE;!qzCgoe7M3xB5`+XBBmx*Af9ESH5SF|jStqmD zAl5Ib{sw>RHT-fe=h{6YNCzgP|kJlZ1!$mFrGN{%}0H!?`$Bm`Gm4% zPj#xH(Fy@C0SZR?5qCWw!*8YYE&He+>jOaiN#yEiCc9JCU_xHU;$%Cqm*v4a38+Pe z3&#t8Jc7QCD!Z2+T%3F_Iy^5Ag&lOv^WgJ;tJHnG`c)6ZA@Kc9pQAFUj&NE2c!~}# zcph`gD=Cec1l8vuFw(HmlH8Y%HRU%-zmC`axDSPlX~s2Z_X5iZ0ew5RTQ==%L7e2+ zxhq2016X1~(iji10}L$b7dd2?q zISle;PbzTiR@sn64fgH0ULOXDgEiC;zjP@B8>9HCzpC)Pr9Qtt5c&<9sF9^y(IDhT ze{pf)JPm_~MfZg4ej0D$QDP-mh-ztRSyhd*u%NUQwI=Oo{e4--RYtx zSfF4wR;kH!Rz@2Owuql}=}%Ny$TyVp@m(YfSdw48C_a=|Ibs=L)J=)2QDsX8vihoy z05vks^=aMTprW*%Yb;H!dv!NV9`mNpWdI(fSqAe2w_VB=(5}14e@9aY7viJc#;esh zd)LrDAhiigT%?WbY3~X(tqD(L16mtwdcE@WhX}b}=k6YQ6cBO7xr-9XCZD@8Q79?9 zY;N*RtUiG(sUF0lwORvN8bd&KqqX{B=?YQ zTzP1kTcak60CjxT)119(shX&*=Lx}(JPhO%l|!TjFh@+KNeFz4dB&DLX7O1x$tEn`NJPK)jpJ;i5kgk`NYpCTOM-#Cps5BAhcx-_=wO}dC44w zru+4C-v; z{E;=FX|7rnP=%s3AzqzN5 zD^on7atQx_=RZBm12eO)j`~F}kK5?_=g=DjInw~6Ngfs*9`x6X5v#T~D5>#lDq%j$8%1MT=B`T#r!6gB^5)I_b$V$tShT4QNXcSwsJ(Ewg;xt9h5;l$0 zW&en7T{y0@txgBFGTM0NwjreFm0fXhTz>z_?2d9#3=Du6KynUxtp1%h&+pXD??2(7_{s8Y?|))GoKb(^G`U4wb zO@BNZ?v2OR055=JQVGvH&dn)w|6t!d$FV$!ZSC*>m7f@ZA{}v3ZkI7bJHPprwaAnv z{fsK$P+WzlV?!_mZGr-sG|*l%=P_USFIha$pqmb1n4K9nf7iPQ%)lL(afk)QMj2F> z|4&4cJzuENV2JP9@dU3_?7LBPL8pmx`^G(kj@Lnrdc=``ocaF>=1WxXcw7n!YGQV} zALZ&{>8PYZo;R^}r^CaxJs$HUftGp+N?#G9*vjQSv^T~_O(GU`BuK--(* zyr1@b*>>e--CCF+n)S_*f(|g&2OsuAs|6{|=F3%lng)Jy%Zi61RGfSlF4nt2($f;IXQ9`PDtP5S*2>H6v~QEU$Yz_v75!Lon@b$k#wfQ zjQ!QcXTwF?ec^OY)p;lks^WXvKeARhJOrt=GUe40hS%ImW5ymzLAh(WfLiMTG$iP2 zctJEKJ()w^d$N=DFqmn*g4;X&BgVh*SZouUIGf(CI}f7@n#zGENTeCo0puz`oK7dINP52@hNcYhqgzKgEd zc_R_eTSG$OHS;?Lv$mfkX1RQ?ZEJJ`(hRE}FP~0+fI%QhEFfLv!ieOItdz#X!}&ke zePvrz-y80xbm##o0f_+>q(c~KhXDblQ$RwLPH7mB?p8qQkQ$`B8^oc94yhj^-5`Co z|Lc5+b6(86ntjdMvDSK?ySC}}LA_PWPtg3xNBsJGhSB1=nP+Og#wH=FA2b78tWmUqTWVP0 zJy}qlfD3g_lTDzv3rQ^hBfh}TNOrgOcP%+2n|KGt}gMURDF{IZ7&h z2olilbYzzzRCuJ~Z5Am|AE**HTia_NS$BzXbrlF4otglS8K4j&N|+C4#q> ztN?6)7Ks2q=%J_2g{PWV4QIc-YJt$kEAwy-wW0*-hcuDI0sAH`L5If05j|bJ`3N@9 z7auXI2O^`A9%#}aFuCC2C(eG}gRAvWWi?i;>YEx$d;oxFm+b|u%H1anX4wak9eQ1~uwBxSV z4Osf{OlV`^3Z-$&7Nn*=p&$&RpBZHO_IFr=);*_IC`@(yfp(`f%$jvI>%aQcYMz(2 z&q?PTR$!K~?pbGZz98ZP`+eduw&B9fQ~&}zowgzj;Yhn2aK%{ukU{K<0)jSN@)2o9 zw*{{rz3g$C$p?V{;m0rDR#Y=N-DQdYqMEN^UxzVo8m`DuyHzP`u%$jD!fiL*b^|Bu zT~P7kA!#48`n~45ck3i;?HxjTzGK5>dgs(bBL3AQ3Pdo%-%FUOE_B_q($JOmf;RNs z@EQmPuZI(?P5rsh13fSs-`RPL^FwZe!2$P?Lc-;9!8`%eG?hFvkTVM7O(r%w=D$?| zyI$GKB=2C)Ut3OK7ZLfdi(o1XU5CGnB3Y<7^pYfJ*!0>>m|7hb=Ai*0i31N9NPWKH zrB!}d5_bm#%In|6TWTe}cWg7+P}QP(NEzNZuY!h?+eAQD9O3i!?jos?J^xx|n%E-r z%8A#Lab&*e9b8{YVe`gRdjEE3dr?qI3O+m0n2l;1_<)UhKr7ZU)FtX#$GUZ;a_*Z z(a@m4o9r%@u2PWOc~I!MOlQ%A6&vM(WMejLtEneJ4<4fpT&Qv5DFZ018axzmM;BR9AMOvJH0;$@m#Hj6Qpa&6hAZ> zrsVfmMhSraFkC*Z+RRxf2|o9-5d|(h=IGKVTBI8+3DLna?ZXGn)SHGBWm8@E3)@^nI5OZy-B>%S@19X?K__}8As>q1P1a1<1}$2K zVC(@7nh~$C+_fT>n9h~bNT)k9YZarO5^%09c14&92MM}oE`^5 zLEiIOOeE55#xUkaAd_!kaPT=kp1)sF{x+!c*r=XC>edulp1%S7O!$G6Uo9{Ig3G*r z$zT_D^3T^|UuQriH!^=I31P))TMRSHGgeK6G1@yd8LoxvCXOD}B09jl4$$gQ$<2ip zlN4gZDDgv=&Nrp>JvD{DW9iATLfui*stmMpqu66B90;PU4|^qLa&K&$3?Xz6^7r?r zqm;>S9|%~f@UuHLmMZJiMnOAH`!{dIo@>*>K{v=)XQpIiTR}zCkG&-GgCo1;S9} zv1|OR{_X@jzkDXNh0utPL=ZGo7Tpm-oq_{^D$eGSZ;vma0HaE%5j5VE2$afoI;9P- z+jRcv%8A8Rp1Nv5SfPGu9LT2kJD_lW07gX?oT6Fc5!J<$EpK3|4T7sG1_n0Hjk=&y zmx`b~VD+<&nZP*DUzAbooU;4r)9yPU+4Z3zMXekjW8+|B1MXd5-olPcb)A!PzK*ct zJqSL%c9uQsCl)9qH~_O7Y{bC86141y*Nj0ckcEQ8+y6ZRloFBLNwE{0o}AS;#oM=* zWkxf9nv!9_&v3A+JZI@G%x$R|)zJfW2V6PDRk@z?T!^sYEms9f*64V`1ri_aiV6{M zREmP}tBtdcj8h8}GnQQ3B$~z4pyOYK4M`e0gd>~Hf+RgXJ?;}7A-k{yT}TE7nge$U z{Q-{_Bb_jxhajiU`Cw;V!QJISid0Y32k{>6#> z081pns*jO_(0B5PfcAWUF3@_f1lDy@#%yO;ti*s!yXqT>)qotdb%ow{G*xv6B3pr6 zv6L?*Kk?nn{_8=OOg_t$aGucP(AvrJgX@H1oPffn;;I>%cieY1i^cfiBa3l*;_1NK zs5$+)zt~D-05>~Bu4O&ld@hs-P1kjU16bi7^^aUT0kUWk100bf5 z*WCIMbTpjS>WqX%5nUh$mi_%(pZXqO{PQFKy`5{Sn^vf-B3oK?S6L`7-rJ3ota5JK zHk@p#N`>MFIY#1AKBvItL_Al(&*6X=0Tiqc6(AYvGW&iWgmL)c9P9|(00av(!SXbL zBh@xRS#Nkd3dHOd7TnyM__Wtovm56mcTc0hPrZ5TBDQ{~G_vnxPjfx(VyF}4wEjAR z?e;2zkA_rMS}lg;=blw#X-S!GCvD%NO5pp-Cllz;WBg2N)O%SMnxrHTprYuBaJKL+ z$y)LBozvRShce{SWOx{-v~s+yLOfy$3gUA{J`BnuO$Y0AGyj?sABS;flX5vY88aA6 z%puYJsS7Zn3mJdcV92Bzv5X8lQIccPPpYw@y}QY_SR5#UQtK5SaBHgSTRDUP-^OS4 zX9Rh$!plqK(vUb11Cz0+wno&sO`j7YObO{6xyrhmvt>;-~GL_`#W@AODD`XuPmozG598PNmeOq zCrWj54;ENRb>jFEF&YtXiZhx1yu*}TMf(aLvl^){tVCajH z+MMUwEAxi-(mi2klpz7{zY0uomL<>a*wHn(;`&XVx_P#|opaNpc0lnxhmySbv+I0K z-bQ_M+`ZTPmKRogt{^oUE;-cN!+9-SepbM0%EEGuw$89dU)Hw8IiKj2ci@ zzs0=-9rcw5&z&v0$Zx+Al?h;kBYO~V=N0}3il)+a>z;t#lf*7{X-4j1YBwv-K(U-v zhhz;9`@XCz4-o)C)#8b!uf4dtJoznva||927+h)6He`cOP- z$W=z}&@suykeZs>{|*)vesDx29vD_QV_hVPYx7rL8iKA#S%u%D)gU$d;WV$uU8}xO zII*?71ybP6MueZ0*jG~WnJZ&IdU({nAEb>Gdh_ePKcJxG1bP==+urtfTRl}?AIdmr zemz03w}|<30Y0CB1JezHIGISR#;epB31xYcAZH zB%KrlBqZ{*e4I5Q*?R&TgcRZJ{Kv?7wg9E~xit$KN{N`QXnD4DrI@n=>9l2^(k-zb zgfConCoVO;o(o9hqY=8lTaiMqQq1Z7dp7a4M3@l0cCB64=NuyW2#tpcgfR4^HCLWS zjSgFmhs2HF7ZzBKtw@#5lbuZ#=IeO#s@G)`LL0|EvCnLcp7kShCakqZ(8YN*7n4b5 zb6^(Ic82&>f2V?pXNE2}mHBul4EN1cnQ@cY!l2YLyd>Y&xAn^wZD`uzk;^h^*6MMx zSLO=Odik6bMXZB^yT}uU9k<6|EEH4qg_XxzQ;ZqCyp`SHnGfClo2}tb)^)l%FUh&x zsas!Sg@+?&YPHMkbc(fEl-TcKt@UNY4F^I)QkVFe%^e258?&DeY;Q_RSB3iP2sy ztwP4E5T(2tqv7GT-@T7{w`+AI$6IyNK3rjj4i@Xk}*X6 z)}MZ7rU^bv_f(Brzr13u&Eg@Q#NnYKuC7Mq)?g#KM6PA?~u&9mtQw7 zWFD(%M+~U0C6#7a6aF->`?zs8b6gL}6}}H0<+qJGh{VjU#o^A5?nuslnv{hP!>M&o=G3*&`fnXwRFOip`@j+7W88yu2&( z&1y_=FfgVgcnT#LU(Su$JJHd3nWsSrfT?8D=dM}GoG=U+PAD}gANE-z$%=;f z{FT^)S}x|di-@qEo~`98N)gw-EXeliOU{!;;~S--YI&LZOqH|pEHQ8;1XmoGKTIyR z_UA=wFzcM929N+mvP296!^FO*>ZPyF^;JWaXof-B zj=EF_>?rvkgv2n++gR#+TSzZb1Zl4vAYLDCISF1blk(yGq)5mblcW5U)Redbj|F!X`C%210R`(} zg3R#ZRA7WkRkue~xQF_A`kOZBrmmM?{rdZ=`xf8Ljle8IGph~ogm-pMyF(dqWWW%l zR>#%u@Zgzr3OtqKTndhppInQ7+eDK(`NG|J?_(-|M+SpVM!EsN%q;ANmIxYLx{4%k zs=^?7-{h5i1WN6#wYHY}pPpyfIot~YLJ!}V@oMGFoeto0bi=)W1x46g2uA<-UkkwdT5i)3Iz$48qHIR0@OhF zgtF$sfxs{5@c9>+LpEkT{2+6EA1PW??>A9A$JCYCl&hYkdP}r;?!AT1k1PcJOBjYj z3$gm8o^7os_s^OGX$Bk_FTv!L9)xL0^Dj%`p>)YEb=P4SjZSOmJX2)C4q!FqV!BF}pAbD+q?n#>Aw;srl|Q>{y#1eaT8zzdpg(zE z?T)5uy2QL<1>S0P1p8dAGS=lmf&Sd)q_OivT^v;u>5$C`epd5s(X(4;KoThGkXO2X zAr9A!-$QIdVSZXBBpO+fxw1eXzCQx1z8GChOHgg#;i)O5T1cUwx-edd%TgpAjddF@ zmb+a|UEyh}9d$5z&KJHI6!j2a`U9h6nvVLzps+qk%)!;Kf_HVKW~5vI3smu>ZDuJG zhbo&4P~a#^Cu!A+mww3sjC>O(JkeH+&abb0Xb+7jKiX1H!y>G$Bn#{DqyT#Y=}#M}L;9BSd8Jx;_#XiaPy` zmZ7;)WjS%XNTrC!O@AV$jXIV%IYI3!zFeRjQ7kMg?H`OlJyzTMTU{Ckg=%aPA1uiM z1T3WbUx{zRWeg^M6=UxNG_I3|$^2K8d@=Jh;q~;b{ayOS|EeQkOm5NjS&HdR-h-7Q zRy|sEpL!l(YJ{~XT z++p!{^Qq!olXW!=V71mNXo(OiCUZ@SS!$mdchZ+*aJA3gRPlHK;0U8tulOEO-T8dj z@mRSA-vmm8z{z^OnN)1VHWF(|G3CP&N3$?$;_Lx~$P^OJrVvX-kEC9SqJ~k`@g)>2 z1S1BS2kKQ{m&7~5wn{rn4YHwc|1$1<6Gu{wAB>AioCx{}9O5BzQxuEAcx|s)^|Ab{ z)9EODwtORXy43wUel5TALSF8_8gO^Ja+-TG8z**e_POw%+?mys#F4O~WdHF$a>`*q zYtSQvLsBS;AKwoe^;nRV=ZYpZ+752#t{Z?xFKvq~$!cxe|yWIS@@-9p$g zRfNm3pFu0@@3-_fiZG-L^o`(5GYhCKTSGrBrrb0T@)4)M;+17oOw*yT;0B232f1*8 z_GtqJ6{FA^={RY(_QtmbXvQ0u$2qn*=LSV^Rt zFuVjdBt<34yWD)VE7#jKh)wcPb*p zMc}F2;OHg4Fv8=Hi2K_=^FT25flBd@+<_(>q(=oMK+#?#Eq6)j*Vf0j^&$2d(UW#0 zxihdiDz&Y0jfGrh;<^m^XDl539{0ux637lE!>M7XBY@vpd`9VdAS}~z3Z!sr%%z zTw+$1`k(-8I>~M>X%${;XA{DyW9&DP?5}^%*XX6$#e&*7x8KL%Gy+&TF93W|T`r0p zySm#^7Vk<_Ex8p|VHSgZa;Qe1VLt*RbDEs( z_KXqLzytzkN7upU0 z)KytXxl33XBg}&lBzd6DvZA7W@!wq0DPR5d!hvMY=BQ>{Xo*9|sk@`g@GkX;=5<*O zzIFHu-3M{XcMB#~fjr_}p{7##SFL!P#?$AN&>-2zzO-_-EAL-+BG1GuGw^|I9KTYV z7_%;uU}6pQ+}25Bi&5Fskx$$D8Lsp`US0r;+H%SvzkU$8d!?LGI6_JJkH)8z@qY=v zUHmSxQ%`xg=L8LdZ96-jC6B%dnmltv5!&*QjRiV02Tm=Lr_Tj`BH-|~JaY)oeX;^C zVPbA7>m6@0S>wxYq7*R?V;{N|$^S(-=wP$du@5`K?N)!quxMy#LJz{C3nA=*{uRMhMW5rs+ggST0q+jgrGFpf=;C zhnLWqjka6$&_&+)-vR%>{WN4{st*8wjDPP4_9*I}s7(?)87fMAxnZh15Z;zLAHK=200doRg4+HO>+vybiry|3Rt^{sR!fX6)>)qYQ*8q~E7n?`-B4UpSkpxrV~0iix?%Kuwe+og9jv6S z*%cKiY`R2oY&%X@sySLR3@`CWhCR{L{5AJKuGh)w zAE$QrP(2M0{Dkx$Io8eWCtpWfaBYx8?^{vv7p$-k~03g!&rFW-1KocM};3+rt=>NS(TN)YZWVxA=cR%=jF~Thxn7_ z_kRDDhEcKbz$kz|N(c%|3W|v6i;BpIh{}kH^9zf~2n!Pk`9b-I3{9Lh*2d>=GDKyN zQZgdagfa*y|C9mtV{PGK@jsG6NJ}8c+DgU-=jLSLp@4O=u*D!;oNeV0zi$#y{?aNL zWt=0<4VHybK*}Nh={5n^cTJPgaCY~waJIr|s3^bE8x}mBg(CsY z`s)9;Q~ZONzfFb4x`TfD5a=yv?{{COhjIPu-Typr#D4clE*5U?n01SjXaA4M{c>Wz zeAGJ6cgvNruv+(K3Rdd|fw7iD{N?WNW&Ps($%g&^-M}C63E2OA4lg^5^M5m_AELff z{ksr%oQ;Qt3C#&&)XC-|6C+ly{`~|H+|)^~N|FsbIl=-Pg5JR0>|~JJX*} z{*lx8UpWb${N((xSp)(7`(FOI3+V!6Du`dH%3rtnmnr-o4}QPW|FK{~ahsD6qOsw4 z6Rr&*A|%*^ix7&0tEj~B-n(D5RDDTn{aIi5h1}QT!d(BINpS7Lx>0oHsK;fW5e+#TpL0}NU#YP zAsQQwH{se4B0_>qxCqhMaJ&iEh7b`FY{Er|#)jifxHg1{kYE!oLNqoUZ^E@9M1%yJ za1o-h;dm3S4Iv^V*o2D^jSa_}aBTxJMU^G@p~ySrR!GH&EekW4y0mtGd4)^ut%2up zNzDL7#FDhw7)MR)Ry0u5}N4OMMOsa z`yyf^{(Z4>68*S{@Fc&lU*ch~E=9NDCnqNi24h)bq^I}w9b)U&txZi$-QC?X zGBVW))U@9GnVFd%KYpB_dfoc;>0UAt_Sg>>+I}&`0!y%OAGw$=%9V+ zM3TN}LUQKNhlzm!2Nkb+f*H>X?t~5)qG1l_kzDdArdbZr=g`a_vH&sn94*=adGj5urOB-kH#*8 z)1R3?w!wOYOv*1Dr^~r@OW(jiMq2u0joUW*{Tv-+BTud(iHV7)XS^qSE9m$Q-aJ-D zbA+xfk18ffIK?%X*^#3dr86=!1&xZ8*b2qP#N6E7v$bzT)e^DsN?Mk@diu1mq~zE! z<||@YT`V@|(xtsq+YT}%T+vLD;MGy(bi61PBjffY{C`zbmbZD3QghUxCR9^T)SqXtgL+MREiEy#>;Dwk($Sj(Oeqg z-KWc%c>etPj%ttI-d-k2=c9)Y+uPgw7Tn9q%EIIE6%H>uYI$R0W2<=|Z^LsYqB(7B zZ1k;O_x080=Q}=X`ug?jWucRYD_r^c`TOa~G+Hi1MP)I5$ks|u8yR|@s;qyq|F$P( z)pnHb!MeISo^#)7+(+%a!!BM-8+q8=OuGJn5#}qesAw5pY7E<59>6T;9n&9s=gw+4 zJ%ak?<1H^6I5M-cPJ4OnW<;F4?a>^?Tplnv*f7}DmH526qCy;r+#PT6jWtgU#a^## zZx2{rYG=;etE;O^eUm!QEhaHh)vm^-?V3jX!Ilrsz2(@}OM)C?fuG+HZ`p&=-O}6F z=U{J77J1c#_70|XFRJz8#ft+2ry6^!j-dK|`)zG)0YchVz-w{Q z_dChQ4<9~!`ovvQURA{+Zoh{^%SSFHIhpzGnU=Y|CyCfZl=Sp=r)HLvh=c@cYHDgc z$HMet_dDL;V=9ZQyyPodurkjIvbEi?QXU`IU zpQiogOYZWFdhVgfU5=mt_y;sTyNK}_Xf_%xwvqSmMa6l@NrKbT*lAl!teV*aIPn=I zSCW#L7#L!VD$C2Qt*wQJ!~_K+U5<5ByADT0L{O(wcetpVn)W?@eDS#yJ9{uq_S?5_ z@s}zqD~s%fh{>=C=HS+g(m(eU(o9yu55;@4qg9?ULz`X-L2M z;+?SiEqcGS+gK+rW7OPapE`Ee!|8Xe>d^-xiHiaZ4PDw$L zHpVV4emsRfwXCcRmU;8$O)$$_w{8^_6l7!V@9m#Q3S8u+4eZRr8+j)8XTwIyO#li^0r}lJpRBCZvUS4gj>GAI$ z@F+mp(a|w55cvMd)vxc{ly%)A(8|hw_I@pKT=YUGc=-6hMw+8p)KyglPgKX8Dx*i7 zvi@Wp9~Za0vcl`6t8Zv{L)!g)EQk8x!-uH_8Dh6vO^>vIVa`N#b#{WfNQ#IYJARy* zf#FtOp5O8}kx9z&H*aDtUv6%aR#Q>IhlaL_c1JRb+rPf;dFap~q|FnhC*~@?Tgl1E zAB;q0WM?A-ProcO1N+`-h19pVUtL|TsHh-g${6aLn0 zC|`P78npy<$GC^5XHRc0pN?u&RMeA{_Ikhhfq?mw21|9W!%c;SLL|HJGqSK|F)^_{ zY3%ay^|`rx+n>D~dKfE+d0;yWas8mZDo;a@^e6 zpy*`e%Tf35-^aelDq;3{mLhLe{7hV2+}GEa;yU8X{CsH4-SL^(*?`rBQGtj3^k)8S zvFIz4!jlzycF^wLy_M~(WXz>YmFD-hY*%i3@PLDZBjiiq>eAraft@S~v$M0(QemWQ zv2GtbG8ev%wlY{xO-%_Dq&whn+)wb!YUSnS`T6;B^74lHMk2TYxdYMMt=Pqdg+plb z3mRm({0e4I?Y3>-UV0m8`LgijOmN`Ci*#Txpe)JB$wej(&dyKU+FsXsAttLDLwB*D z(B@>wh}nZfLP&QVWM*bwm>C&qiPzt{GG5e>CALp$mS8H>|BLjfBFiJR`NmX=h>*QYI!Qz&pnE-H`L? z=<7?$rKh5btgepPvwiF}@0>lEACS;CkDC!3}k! zj)VB!nYtZQbv|FOxY^s zMjZ)Lxo`CRRt`;fyn&(NC|Vv%rcuK-*wpH%wA&!;5}I=d1q1LOI#rTZQ*)I!n~I7G zKbuieUQU)bgF^RX9^saAb8{WJ16dJ%s~_4qvaTB(KOXkblJ>}fXmzW1QuszXk}Zdv zot+H~y5HEy%gaM7YHe%d3W`sC455o$5cV67V&l+y(b2&WG_CJj%oKc`TRX*nw#)ES z!fo)-lt?)YC__6Kce<76!;EZn(s=cRyv zUY56C7ueX?z*8!j3+=HM|x_H9L z&dz(}5sk4L`B|mUql#-+*V2+bGH`Xi zVJ$OuN?lv~)5jOtV*Sk0Zuq$LH*d_z!qU;4w8nGwEn6~+9kAFhv$Kis?#1PL@H%M* zX&gF4jBiw>prl-z$yVfb+1~Z~b@E6iyNF0^EU`P-7pMDy`A>uMpFiL1t-gGjK0BBc zwU}65E|weL>nI$!{0)?@z92pzAiykefvaq4=K(ngc1!AJU^1?*t`6P#sV_zIG?ve^ z@igV9rKPzIH%0DJTz0vVPXRk~_{fo^;lj|Np&@O>m6_F*B}b`bdB4hmJ^Ir=8X#u|4xJf(`kD0gnB%|55Eub=VC9;Ga#LPkl6L?ZRr z>EMrZxFRoAFGX?2q9-ONc{2@2l%vwLQ{;{by+dyEG7KUmA&EfK)z1(3DJUpVxH7Y_ zJkz9e^(ti(5ph)+(8S?BRaQ#m=-+nglaQ``ZTE5$ZTD_O>M zNQHwV9lU#(O4|GP?_u+UgM+~r)g&jx#^ zV zwo-?N{#-dEV8*D?cx{E8f`ZJqRk^tr=iHJGsfDl|nbOGYNJ&Wneq<89abRF|tSj&N zF7NlObaA7AdR zTOU4tTu6@u!;XxMgcx!8^2_CguY*3L%>EvMO#5jeQysg1juN|{fsRh~)FXV%i)`%| z-#XKCb4{SEG2e@$qe~nZ7yt(VB~M{t;pnp)^Ru&&Op;O0MxzhN``*Wcmp^(2ExfWK zdtLywT1j&zbJhRzJyNkn_vsN0IrX>gTCr>Nkhws)mF1C;nrfk{y6p(^B`WoKZ|}7+ zbyIEaXLvlx4x$HCh4~eE1qE$wZJEBEcLJ7PEv-Hn{i?XO?4GOsy56rUGjrQnN;cl} z`S}95Oy-(Lj{>g7lU-5FvSj1wb!qkW$@c^Xh)`+UeI5=O`a`v4Vq%g%cd>amV5VJN zKRRvk3ZAmLP%9sP@mgr%pxPJr}cQYZupPaBVvRr5N!L0%BfD`I766 zcN61PuCDLlT$$~&?)LUr<>sTBnv-B3%(`Y~2-PWWGL6*>4~HO#>d2_1n0;w^(b13r zadqxC11#hy4u>-{W64x*ie^DnO-1fQo{tNFC1uQ#45&jOVs=d@f<8?9q~aVMGx#`n@7_%!gwJ3& zsC6CPa!O!tng0A+@u|TEZBH6LuXte;n!*;6L;YO~gKMkYp-~ST8+GJIUV9CyzCocc z-U9%}*bEC#q%O{e@xeB&4}Ci$_93aOspBlKKt|rxMdaz}IT$d{oL3L#PSQ0h1LZg= zDJjX6O9;QLHKc**=^R~cd;Ap|($a!6Yivr49G#uDJVPfSuT!@lg5>tGONzZc1ULO? zT8eW)OMQbzV;#0P0@OI9r6y;Ti8yGH5Xh@*YhOrx?#Mg>y7}bv`~DhtsKxc8JG#3I z^YW~$jFq*uqm2Zd?d^5-^j>s!j(qVpk|SoT-7E--DLc+m)D40(JHH#heSKSh5I zFkQgr*$u_ez-oTZ9>er$?>bz=N@Y$?*z8K*6CDy~jGx*VpGY@#@}r z2XNK5ZtxOBSv(u~Mvy*L=FagvbK~^l_4#b># z#SH36w{J(O>NzDHqe>&g^uJzR`qslVB*5I0&h4WNQ2_NF1qfmPCG|?yYg9L|{g;Xzsq{J)0+2rUi zKG>!B*;|h5;w`xUmN_i^kuA5taafeuY;j$kKU89m**PIqrwwD}6mom_IPq;fiTBKh zb`cSgpotMs52LpG7$>KCC<+jWBI7cfW16>T7(v&lY1L~WoVlRgGxPG2;^L+&wcd}8 zBJVuw@9LVJ=qWpQ?i}P2Mf;(se)8l=(DE?L-o1Ngt_cbVK)>nnaC3CeDNCOuQ7S-LXaiU_H7aBx_9*|4fQjIe>6mzvfK3n^o#N)M@xY%K6-jv zXJ?`C+M+v)7ne)-(9_r6zOBn_!oG@%<~jHqFSW`U|E0;Qk-)X-%MFi)@*wIi2VgsI9mAP2 z?qYL|g?!;b=q_YpxjB@=kn-jg7TV;lrz7}6s0c{5oQ;^;$td5uAII|Y*JpSU z`G7@_tm^}J0+OD8gzb~2-O+z?MNk&XKILArb0~wQ=dWI&?WCclk#)Vdr-x>5%-%2+ z@Q^3SwnK^V5gIt<*2THBH=c2ykm(#45DKlIqNa_Zo2Jobj}?BWudE#6vXBq9KB*E6 z_gO}G)vr)}fRYt8E6M|lFW$B=dWu~UpfD$gtppmH8H+j4ZKXKlPxAuMyXv+$ z(Sy8y)CxL{qRviG`N#=NN^E8bQ%PfQg#*^E<^KI4P)<5Jx**45cn5y|*1^FWr`r1< zTXW;%tTv4dKETV%d+gXT4Gj(Gg^j2io0|H8Gh6=B4ADZsGrzj}%A7irpr9ZaT~yIM=mBJBX1el=3fECXPk;+3_Z3xUXOBJ%r#Bw6(9wA= zOrE>E>|c0}6fY)*#bOh(u7ox|crXn0Ld1ModiroxCW1~CjV2=vAwe-lKu316uc~a1 zi<$qIZNto3nwrP+FYe#Jzj_}Zb%!Z050AMlU3GYPE4ceBla&(=^mge*MQcqylw@JG z%K+ju?+Yh;`^VnFdsGwKj%aDUcf6~F#<9xCT$5`~oUpUAgMuFEM**(O&7d8}pAI$m zwd>dOV;a87dQA!YvEK-oyXx-?F}O83BA1$$hKq}9=8odysIzAe9Y0Q2mi@I&a*w%?xRmJ%Nfu^=`jti4_sM%|K3JrC-i_Ma0B_DqlKaQ z6mi24!uiu=*{f~rVwlGX;K3Vr;Adpkdr{WLO1{1_Ox#d`jSmlVEQLTS35Db>0r6xt zP9ukT{&;R}Awj`RuY71I*VosFg@xtjj@`Yh1UbKjg$3kklelXTiv*Hwpn|xHv>h5A zZm9}yuzY*}9Fv5jQJGDf%Po`0iqj8hrX3Fyl~}jLK~rfD9bJ9*8_1cVskK+=1iHw0 zu6wv%g*_G;;S+sT&w6@%h8n|geSz6fH9I)eLU4HW=n?6z1FTxY>{MS2Md2-mhAa=O z*VNXA;_(6|<#sM*P!vGQH2VI%u6=)dy9hV8x8^9CsiU2x1Rpy9|)b{E&9f9e5d0_My#hNrrqB^k@o*4 z>|_S5CV=sv*j|croOHzZgwKwn*tN6LQ&YdJ%n!DHn1VcPq-liiAhd+dA=x1lTw8qE z@brp^vYHpv4p7l@i(TT1=QtBHqVDB24@yWD3G3YFx?VP}cf2Wj%{+3rh z0A8No?|uB`>Y5sw!X1!DSe7t|P_R*XnSiPJ`l1COI<_3~rHZM z3){y=V-m%pn8KS4lHw0)vX|^Ww6m{_72nV85O zJU@<)$I~lpp^m#>47~wiVb+p8WF#Y*b)?)#B-kxChQ$NJlVLkZ7WF5?YyjWCY^TYGhTCY#EjN{|ukhyXwC2n_YNi$)cg$)s_Q&Ckt zHVx_=Vj2<|@R+2lE(ToKPD;vjk4G=fg6b_EY&Y=RCd|&u5T;^jW1}{E+sDuEa$+J? zk9T0;D&*KX63$juAxpv6sUEA_+3`fKu{t?TLJ4?~iwinC_1)%rdU`0-mioiPa4ZC2 zV*Q8)n&8kApP8G3B8-A$i*IN&i{e0gdwXMJMni*{re;BEDjNb3J(tVQ#FPZ?-2afS z5Nn1RSfz2j@2txQ4G6msd8Wv+8WQ77Ql9G~((}k(2t}Bb4x5!Y8^9 z2M;#2x6>FhadcdG2;E(7?$PvU1hx9uyFML_)ANWi&;ny)$>w<-}o=_Mxmb=3OnoD~MY&spKj_0OZ$ qU+1~MPjcZ@_kVVh`)YWNifAdCIPQwKIP?17xz$kBQYl242me2ZSGZgN literal 0 HcmV?d00001 diff --git a/dashboard/portainer.png b/dashboard/portainer.png new file mode 100644 index 0000000000000000000000000000000000000000..f599f98dacfdd5938639a24dc94f673dc937b91f GIT binary patch literal 7071 zcmcJUcT`i`x9>wWQY1)=6bT^6Po#L1-n)qO1f&ZnARQ4x2SbZ=&p}ZGf+8&lp#{)T z1Oh4?nh<)ZN(lrI2q2-n?fH#2-gy7q@$S3#{*jFAwbxv0uJ4-J^RwoDVrg#7$tK7K z0)aS9ZW>sFKn(7`KVVj%kyl?ZFjd)g3V&2px zJS#;QV_Qe3$TYvFEpJ|e-C?Dm83abYY%~qJq(>TJ`>tKG%I2>FDWdUy8m?D$rad>% zKXIl-kepbyXU}ImVU<7+Ik3%{O#W>Za|aJ+AcR|n=252DzAv*wMLBgLMWMW(!t@gp z5SPoOo>lj{kmlJIeQ%8m_@H4%Ytm?Hrr@w}Sj+Q{1KpJ4w+mqR`eeof{A9EhzRrf- zqRiYnzeFc?viuAjqMP1r18UR5SYR_PWvSDdH5d4d%VW!QHueKpPr&Xso->ZewrUru z+2fM~mGwW~4o&Ph@Z2<-OM@9LO2<=Eo9HccXwheQg-p)rJQ>{cS+d`ogJr808#mUe zeF?f@L`qRRnv_A*>?FB9pu9L>NqpAJfDp?RSw>5+y|kd$K0e=r!W`yyXEv&EnKVX@m}7AgLu7d9 zJ+5xg(CL!p&{Usd=EXO_f=B*j^r@xDL>{P};>trYBUlHaIF6Hhtz%V*wzVlkAx@fkn0SCpy9rZ*IAy@o#=(@BWs z71cJe24%Q_Zn}dNtZ#H_ABzxAiPX3k5>YCnBA*%H2g&%ANU@1*s-YY)>$?;$pJ-$4 zE<@S*uD25y?p|Vh{S05DqkiMwsZ&;H65}6T<>{H;uKV6~rJAE5H4@>3+JJ49l&R0R z7RML0%QJ0b3exl_SrYE>=Zwprvh&rd4aYq6$5FnxKlH;D=n#c_bu#8QU2?EC^DkP3 zN6%bJU)Hp=1J2#DZt|Wmys;^moSnngjGT*$?Enc3|b%oxw{V_X62iogWx<-ra zYBdisfD~EtO0Si*mZsPNJEo51N8kj!L;2Pu2hMLp+7u3;GADqzSe^ucva%Um4nIw!rx7HS{=Mbu8Wpi_u;xbGIwk7iYf4 zR;*0Vt^Z&`8)fdC5Rvt~^5UF{f^Fq@QLTXT!$QiNFZ0h4E!6>at=e?rUUE2K;I@1T z!T%y9>A-ZAdNFX*7TV{tqUACCWZW!IEot=|EOvphJKniGah(u z;w{C?=-ho|i;4@rhH<51rCYE=E4!XciWcPXMXM}5w6fK!p~I1Hb8U?6>S4mYT_YFe zd!pUc=XX^w7QS{}nlJ9YH~Qx5VfuLE{TSJt?j)$1R~gZq(p_7uPVM~MW~_|P3^qvj zj@+p#t5uhrs!X*`+zLdT>Vc6)xM5UwKBI%M>PRZq2HrTyk$WUJF3li+quVTSq_Te1 zOd&@kW+Lg#Wf{18V8_ebh!R}Jv%?0{5~}_;<;&2GUM$b<(M2-_{^WMzz zGQR<*zK4uk;T2ldiC=L2>hkwWW&C>F)}!YG$Eyz`R-aHS`@rta2C>2Mm7H$F1uhp< z^JGmP^xBj(9kQRD;NW}ajBheQ&HKHYB^^$vTHtL_KOIt5rK`8_q3r8mm9FD{zhFW2 z<81e01>W1Ur3CzHPie`7a~-wkqg_Tw)4u(WMtYku{$k_dCWkRm*3-_s{M=w?k$yCI z$~q$~tH~cce^?`FxWi>h%}I(WJaqb=ZG*^NUG_Ed3k0%VW6lgL5Ik{tXWC-?PE(RSL1b8t2V$ z^nmo&*={|<6?z0lsZM{-&6lJ11y{;CrMVNq1DL>2O_IB;cg*i2O&Tf|_RJOD$%cH$ zpQ$bnm`_OkI6ecU12od(kbq_Lu#xz2Rd$)^@b?G$Fo_Ny_cl#V`N7 zO$_g>X;r4otzBfBcx$Zxvcw@Qr!5foIm2V6BEYYI3wwpYvs>5S0zYpz`o>Z-<~Yjx zdf?!x%lH&oaU(QHacQ&urRrQ96(b+^8lF0}>dWC+=eB5da^ik`JdQ80^2mDy`fuki zWiG=Z)>)O2t5X1WMG#-~BCELymB8-2{@sw%Kxb0u+5H;(j+*#y!9I+Jj?>vOpd>N0 zReh4=`r_#MJqF(7|3~0^uV}>$EV)6d^WEF-P3p$S=ufRB-owr#tq+wS6lC!=a}{!f z7bpjLKA#ysZwhXO+mT+fZ1nzZ>+Gl{6pCP_nl_JX)vh#9Eu8RxjT}bBt53TYa zmuJerO>a$(Prv zdRdessTsA8$5DW9Kvb?Z(EkKr|KFI-(=7B!`}j7#>|Km3Jl}Ck>*ANj{hVqXf$gHG zG_S4`H=v=bbg*mggAjWY`bS#~S5A$5VU>y^!@t45yZi~;%Icb{8Rm48Y}`kl2KYH1 z$@3{3LhR@5u)%+j*lIzSj6J-b^wVr62ISFIel|{&Ju&D?!EOKu%7q$#nmIQ*x&+$P z61<0ODH(3TfVxv*d!czgZ;?ElA`%;S zks&o4K96B?xz5Czk4PTbn%i?u8a^nPoPcrX5BCRFBaFIERSC^P-WOZ~fB%*n)sYs! zDgQPZyR^yb;CWweT}K2${NXS`c+Bt61t8`P=HL$>0Shq{^qDcFXw_wv3{nt8LCt`R zZGh)Rbklo)v&Exh;AB(;q`pTM5z3(SWSG$~H?d6PfA?E>sJTYgVgIM8$tf#Bc~CDa z@}GWkV|u{EOR6NM8MEI5^wP|JU-!&x$eEvcvkhK&Nd8XN>78CAg9gu{nw%Q3ulEDq z8g9@6Kr76;PU6ScC$!p66Of{^4-=3~I+vxwfw9}b)TR(36|1hdm$vLil8BJCt%TGX zZYzIjlpj?SBg}hYf9T|whm z)*o(+eJ!A|OwSUe)=lmk0pZ#iM*#>0QLktH+?;aoN7_@}#0h6k?CqLR1gvX8nA z&4~0^OdYi`g0tCW%6S|@l(ZQ<(#NE=(9!pdUxgsnR)0C=TuD4rH+CyDg91NZHp1p3 z+Sa#lJj622OmvL!`$=bfI#NQUw(vNS(#D%Q&~>gv!dV@3N7v&RKipP?!w-vU_w;t( z(Z?)yDwIa-(q>bsJkspC>z^2J>TdgMPR|l@&-uT9Eh@a``YpuC46_KYsIV2XtiVdG z3{X3p>4^OwQv#x=R-u?Z)pduC!#cxBVeGZoDyp*Hb#q!_G1y(ZG>LMihGju4Ke<{j z*wDYhrq8;?P|+i&tyUL$(!5C)478)tm!7f?77UDy`In^3a!#Z~OOak(+)HcE{Fr;u zQmJB*gdHohy;Uz-FO1|1dk3;F>^nCT!?Adt9`_E0C(%{ z5hy?Uxg%ec^_gHj;I5-%7oRs}wbSeNGmqa!R8Y97n`@oXWPi8a)zmcF{)b|lG;U(% zu2g=}>UB@%dQhs^a|su|i%2uI$9+P2V!&%(Jm?FQVn|wHdq??se@c{p2rFTXE2Um`I5= z4Lun17c9uHYgm1>gRsj|!duYmYPc+8tndr*+T#BV3vMx?7F4_FHo~Mt0|7~jxJ7mQzTNnT&J-{oi?Z273H8!}8{ z_=3Kfjg4rSNo~G>OI@N?s=L6WXbt$X>U8l~WXz_Nr5ST@mo&5T*pjl?F`rMu#2H8J zi|(fYKs*0zjA^F_N;s-K*tqHIU6`+;Xmu^}FBe?jJj>?Nw!)Eho>E2zM6UZQHV}AL zhYJvl^@9zZtbR?NRniV?B79dqMqP4;E>6p2@XPaJiIAlx;cHS68F+zxn*|@VB z>g|?0$-hnsm!Lw6UcSzGKgi5Sz^$h#Kl9LH$?CH0PlFVN=g6R=EvfexSe9MH2*y*j z2`69VK7~u)YdjlyV|KD~)qgL^S-SS7TkIrcaiS-1jDP$OwZ{BbN#F(k-mK(J zwjWMkQ~H~Y-HtQ?b>+;>>ZktJ0o(O`)vq*t>?whs@<(p6ZvC;AakR*)clbvodc^)?ATI+pdy2|inx9s{3&|49 zc`M%ICFLz-0vC||r-PVl5{}L<4FFvy!-@YMEEw-&3V6rvN~LZ@b`#D56Ue>HO!hZl zv7L!2REh<%f2uJgH;Z=v8h`G;rTH-haKcD;2cVxJ;72%f@DrePY4^?ZIa0JNqoP?z zM-|B8eYR%10gU*c=EvJEUBI|m0F;zUD)3iSXswZ%Q$KfXNq!sZbH&>a0nYWy0%V8V z31IA+M>jboDs+2%{6`~>k@yqEE-!XYprlKy#sF>SbU8*k-DWroRhX|?ayjnr(guKO zx?t$Xhbkk$k`iF76b}IrzMa?pu-0;&nA_IJ7$nclETH*^*$gL469p~HrK)HSX+-Py zcF6`pd~2~>PAqA-QX@o2-Li8^C`DfPv(80zetnSR~U;K#{TN-?x~9I{{QWG@H}7 zwqOQ`|A3n)NdaWD{SAXU)XGgV=$kmYw^4_dgXAewJVB5Yn1jDQf*-w85{TM=><~3b zU;rkfzbK;Y1JW2mlsR10t&Za@+)u-2rKnSE9V0B>bc<4-bEH;0!1 zcC<7V=KgJP(=9qr$r#u}2m8VT%x55i9hhs{*lxO5 zvI_vwy9-`mvxLeId3pf@MU1U_CGi~zqG46wmiCR&C3~RpLEdw30FE5a0qcIYrw_y< z2dYW$5j?sDeQ%xsb$eakPw)n}!|68DRm0d_+&eGI0V9-k&lGyUc8 z`~Q)N_SFj{&iYM-eE? zYg=KcdP<${0;I`S5wU9+#hv6MS3PePR1*Vl&t8m%*u6IzosB}5w&jwxxSBCQV7fQ! zxCJ0NCdLR{i4oX+1+OM&72|XPkqUqgKR#Da&fKoY34i=F!@k5~`@4V=$Cz(i zCQ@QcAF?RP3l8(>L8h!rSN8iIj+sYlA+aZgpB*cf%3kmLuSo`2wzwQSuad#h!I@Bf z%Icc7B?cF{&+TT+GL33KwHc_md{7Wu;~G|Ao!`3(hN7UI*4{Bjm_?v;p7qpLW_U-< z2qS^;%^4eF_p|N!7j~F5`$LGthe#PyzS0_e|dI(s;UF+kk~T5 z7KZq;@jkfq@X%)A=MaSXno)7}Bv2Ci*YmxN`^;g=>sjTk3NsN+k*~uN{dzNB3wFoV zAJtR5#Moy=D{@o1RnIvj!ixvJoF$%9&BYl@Tz8^*+_Y&@zh6~171S*e_`kvgr~u)EMI7#M|BL9TN0~@q?ez58qnGgW!}Hrmx&Y z^jjS*D~tqO=2^8m=7h#ay1+J1HyszXWJ5ts{`CTQxgttBhpmKHq=!fo82fD#>5ujH zHw>r@&wygI0^5B;;>ocXfGh +
+ + + + + + + + + + + RADAR-CNS +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ZookeeperKafka BrokersSchema RegistryREST ProxyHDFSMongoDB-ConnectorHDFS-Connector
zookeeper-1:2888:3888 +

PLAINTEXT://kafka-1:9092

+

PLAINTEXT://kafka-2:9092

+

PLAINTEXT://kafka-3:9092

+
http://schema-registry-1:8081http://rest-proxy-1:8082hdfs://hdfs-namenode:8020radar-mongodb-connector:8083radar-hdfs-connector:8083
+ + + + + + + + + + + + + + + + + +
MongoDBREST APIDashboardSMTPcAdvisorPortainer
hotstorage:27017:28017http://rest-api:8080http://dashboard:3000smtp:25cadvisor:8181portainer:8182
+ + + + + + + + + + + + + +
UDOO 110UDOO 111UDOO 112UDOO 113
+ + + + + +
IP192.168.1.110
UserKCLTest110
Empatica-A002E500:07:80:1F:53:1E
Empatica-A0027E00:07:80:1F:52:D7
+
+ + + + + +
IP192.168.1.111
UserKCLTest111
Empatica-A004FA00:07:80:1F:52:F3
Empatica-A003D700:07:80:1F:17:6B
+
+ + + + + +
IP192.168.1.112
UserKCLTest112
Empatica-A0050500:07:80:1F:53:51
Empatica-A00B1E00:07:80:A7:BF:7B
+
+ + + + + +
IP192.168.1.113
UserKCLTest113
Empatica-A0028500:07:80:1F:53:1C
Empatica-A002EC00:07:80:1F:17:68
+
+
+ + \ No newline at end of file diff --git a/dashboard/tomcat.png b/dashboard/tomcat.png new file mode 100644 index 0000000000000000000000000000000000000000..50003ff9b63d73321ecf5772b58ebe9e2aae1df0 GIT binary patch literal 18593 zcma%jb8wvR_x7`~ZQE>;#*N#cX>40YN+qP{qYV0PB@!QXL=KcTOoo9DvXLj#< z53h5bb8ndZk8j8b1PA~CAWKP#D*^zB3H*8#4hH7=aXZmN9SEf*|&lK;{Vqzq5>TR z=h%b}-tYJlOl@r|KP!k1#70`VZ;uTEbYWvbWaC54;P;s!02k0~49O612#Rf=FLf7( z^PK@D=HJ_}Z4}|u)wZ%H$a@r+3w;88hwin@ve1n}_2mX(iF|YkU5Iw5t8bFa4YN%JzBJ>KCzPuHKkX+)T zn3$|5I8$A2m4!$k@OCj|1ar&`T%XL&Z|bZjjScMdnnliNZc-5D^?y&@(E>thc;|kP zAoU2o-%2PU@xm(NH{G*?6h_2dy^v~zcFQ^S6#8~& zGZ{&Z)84O-gX2=fjK@9YCp4migo=}68lB;)45XerP6!6z0?K`z4+?G{vym@kb)pwH zl1CXA>Z{CVI94aak8R}+XQhMDRr%6PJRx|!Ge9?FMxG8bv_83S5fFIT*{jCTN`Ofp zwd=r(iYLd_&|+5|?4-`LJg@Tts~5JD>^p-UMUeW(XsFE;;t?6xW=VEqfrx$BMV8U! zZ2dMM0Kg%}8FOSC65}9FP&)3(F=qrHf_h8)WEFwYDxPgufKbF#=O4Ncz%Tq8HX@a` zP6rPV50YZQm!sb3V`=gKZ>=3jG6j!Zn>VY0olc{`1FFyl5K7f~)JADa=`|dSxE`zL|+SC9qK>F-P6O*ZR?Wpop40O*%W)%acNJ@MB$^E#}tv zKYh@Xll`a&o)snykjdBqa%SxY;^o$N({(_NbpvkS`#>80F#UB8Lm{iA!u+GmO8~-+>n`rum&%VIUpZ8-K z0(E3|8S1eI^0NisYL`FWgvWgg$sT-b@)Fo%L8VX%rLlfkG`NtXu(2ioZ0-wuu6#$u zt!BPpf{-}(HNV1NV`h6fS$Y)67=Ov3RGL^?7t?K`GA71}C<^=p+b>G>Rp^^yU9ZhZ zIjzmTm#Hk$&kAfJlpT{;eBDpJ!d+AP$qZAVW(szN?GkLIIZy`7sG<<19uj#;sj zzXi!K5{N&QWpEgngHcU)hnj|S3Z!*Shd#RUqztm!_d_tgFbHgUDXD}i9Y0g0Shm|j@ z&PP**B?TkZK|pWY07P}^GNVbj3I9K7|Bl+H+)HC#A30FWkJu=_f3qE{%ewM$D&=6Z z`5Je0#MqjEhpWEWCYxApW|`UO5t14jiY6tNieD3klwf#XTddx-Td#qOgc_GC%h7cX zu%w6ujfg)6gD;AvrkkXdx61j-1zd+uF_~`ip3kDqho@B#7P@gve#Np!k`h5usCpZ3 z+8m%9t9uWsn_a@c{kjW58Q^%AoPU)02f>M&$w6SzgM$y%(@=PR>G%GM_9SDX6PLZV zfqCrAQuPHmJ0n{%U2agbFnm%@2l%1lw=Y+)t61w-E{yzNBKM3yrX zXZ8u~tIm-kf`)XFF0xje9ydSCn}S)6?CC;y=)^kziCSi%M9l2#3%9tItnM@7i2uOf z+AP)))GO<)NC`Y=A&=+~w&P9Y#XoW`P_xX;TTMjSS|91%Ua36fx{OrXR{Pe^$CT_o zm3dM|lS8_?wH!{-(rcxv_`g_IOyK*C!&2U8Py&CO6>olm(Rfa!`g5;tElR|Y;+=Dq zv3=7dcJL}BNlL&<&YAgjTPvMVqxl|o67%G_OH9!JG)C){sYiEvOlUZMd=eE~H3n=HLwhGvs(4{Uyl=_W5U1nH*Fs@iy#x-h|ZAuJp4 zqL2c&fU%!sL?(PkWMWrHQfDgk`Bcyn6lGJ96YH4}QMG-j1FyX=2#mz7B9Q2NUSBlv(&0OzXX2r;i< zNK%rTfy{z(1aDe$P)rKZz%pb@t}0>o-H1%|ZdsmeonaqY1Mv)_(z?;mI_GTT(QwV8 zWmI1Dn!Bv1K+^MRts&?RW|m02t{z@e%f{i$+7MaN6*q}sHK_Id_+tooht2IkZf^3p zb4Bfii2d#9#4BZ{I7&1+Rbwk{o~K>h%Sd`TAa}-+ssj1AvTW6wtPomYl@q*LVqLdj z8rFyRn-)FezSyI3fT2Pjk73I06gQl*j1?sUF~8^yFSNk&=Rg|-iJ&HdD+}&DBPTNK zHL@?aaQkp&pvOj(u|?E%1=m{oj7VXOU=Sq@NRhP9puk0QQvNd^J2rF z=oqT{dpzcE-fGP9lr3VNxo}yhwxV)`2o0B=bhFk;R%~$d6|{W|ys<;_=VpVGecZh$ z1+i^h)T4m!Fl^Y<7IY_YeIMq44A6zTp_Al)yv;Ilp#->7=^s0s<*F_`n!HhWerglcq+WSDI!Pi3K{e&8 z%AC`1jT-p7tkeG$@P^eW?n1me_RfIP=awANSCfGjntjhJZ5%f!BcL|1zXNBws0IWC zG=qg0IsGTv03^N&OBiRlUc`M#Gq!XXoT@KY_go{$a?MlbxD*x2?6l+ru5kl@oAFh7 zd>=$KMbkQl98itTE~g7Va08&$CWPNtsmV0uK#|{=Bpv*MZL^`6q4g;wY5$afi0tN_ z(l%k9xa;ckkbe?a`(kv&N$@J&VPBGOyzjuoB_XGMyS@xQx-(V9m7`rkP zS?E3VI+-;$x8?cV1m)=Q7r}&LSRS;hBii!E4wz7AY28o(_}{BrDS9=-Pi$vS9Q-s;~mKA*S418CQ6tM#6VYtsk- z1|d|w+rGSZXLiUY1b%fPZm!!tnn(JvbK)3QFj4Z#4zi3Wy)P-~XP}*#3ZPFeS?tCU z8#u*W8`A*c8$#9*G#!YzBNO||Y<8BjX@HX1otR)YgP52&c5`S3Yw7*|*mQY3|9O4y z*1YHNuWZS`C|i8{ws3mtgfk2=qQH`wT0>zB&J04gchD%xhGndESM;FD+HXu@L&unb zaBDWz{1SJULq+E&SN>t|@sm<+q%XRmcxp?Img|e?E~MCho7c zg|s?SlH%4J?H*=PB&yNLYtwwwXoG0Em*Xg-FhDAj9*CrMPfEEr4p}1V{T!=~fr;-c zC!8!ZUG6XeY4k7|b_ZG$BmyuT95~%k&R&yEZw zSbIVQ(QP%*c;@oExA9{m%-?V;%zI!JxI4xRXr@Ou&msyQgaKVAA^a@Yn`~9nhVTk; z8aTylC(k|vt1#K`yO|2~4nA;}X&uLM1~5UeEd+f=g0|bV;d$g+0WF%>D~~PNK@X{f z?*MUHDoD6kODvFWKfYUlZM^ei%D}V!PJL=#j_QxW-YAv;2L-gw#?a~_-z1tDiwWVn zc)P`RMJtZnI`ti-oho~z9lr0bZC6A2p%0miqM9{r4~%|lK#g7Y4M&SX4QcCkHJ_2f zHn=`b{1b;zWwWlpN#K(e#fOs{@FC+uit<6{O5l(3j^$h0Vv(bMS$ z(DGJNlTC>HGA85l_sFz~mhy%fYyJ?zgOlfvWr$ta#sGsK3V?tK2a5au}*dfjn*WUj+dl-ZgMl=*zQYIl=&uM*kTq?nkwEpnu|{a*b9X znNJQxtaiF*>|&Tetj)9xna0cXc$c9)XwbkiWq+3JKn(C@Jb}i$>4$N&Bo2bMxf8Bg zus4MFlG2&LoL3yHth=vDD*dKbi zK&7?)c`5J+!7C~MVtlBKwZNI0&U0COajW_R!eJO;!{E@Irp+0K0hgXk5MbMe)$P%fvouzn9Ir+< zQ2Eq>tfHA|8xHz0m}RCe3gi&t<-o+l4(_>6iwHHB1`LM82>YP>Lx{uOQGQ4ddcHPZ zQh;F)3_E)CS$?*2Q*&gs1m?(eb@E3NOj3H`hpc-(uVzMGmkDDDtEtgVjai-Pgvuzk z)K# z$TDGN9QDnR1VCEE{to~=NhH-t-S}C4ndx3vukmbbnXbp7`N%3?!WVGe`%>IFNZ))YH{pCvvS!n}VxNAqRsB{jU9R2=jJw3n!6#_zO_u(C6Iz?MrVECK zshE;0QNlrDVpR;^ACYxBKy_;`PO>Hy8F9$-7kAE(IGw(&ZhE-?F&aWv74q5n^>-v*BG$?J+^gl8uB8}4z94Rvc`40`) ziB-uZTkvp>X5o6z$l49l099WeW~Ct4Nr2zwY1=}L*hqKl56-CV9aEZ))DXnoxw%@I z8}a{j`42{cW3Ra>TL)t`N)er7Lsn!i7Mg(C7qn{k3DC80QaIn`{zBjWz zA7`RdyF1-q16(iqo+6?f5m3+!UX7+Qi{s^|in~s@f>ywBm4~w_Us7g>lVS6wDg1kK zjcfO(u*0y{{N;eA2RR`qr>&Ftpu?QzJqk2cc0Iv zuO943huar`_KIHbZfnMXq3PxvnGb;TGKbuI3k>((eUc*#>Ph6|Xsl>`Jn@@PO=Yu2 zkW_f4qzim@h&f6sP0OIBOQEm*dxfa}^Z?!X17)DGu8N57tUW~|9Fq1D1b9+IAUR!I z7b$CNT^6YFw`hK^mS?mPqBBrcvvDX#|1rEpgrgL)Pa|ohgtGw$QP$&0Vv5Bml+VKu z=;8cbD9rG?jTE9D{Lp(6Vb-b!%ATdjc-eVhYT z2YChy$`xt!k)3^;9>9QtUR7EDO$7${dIc7OQ9LzKdZ5dz+RjfYgJE|78*wjP>Pm+^ z4tBrb7n4?Hj{K0;DqlHv#F?m12Ct1#4&pR`3$HG!Du7`=k=dX>w|%6W$U zZ`1cDo4xbG-6}tSr|2$t)Y^%nxx3VNZ$!75UkP9Gy5lN+-B{)vKtrvcsNL_`SgpUp zxhLt0u-Ayf_@pGXjd{2Gvp1d}lW1MS^VUc1`7;Xq zj&nl@NbX;fve$=P+4Sm4#Si$S?=ITxwV$_NJ&O-N7Dk%vL#_D|rhnz56HtBC%|p_9 z*!Hhfv{*4YPG?qQH`^82oM7U*YrIcXf!yKl&{>tWD-mE4Ly1U=E6Xq?BlznAnhdfq zqqadC+qk{368Xr|z)oWG(d>`bWI+n!b?Cm*-- zx#!NlPx=0$+%m8Rt_m?#ETe))4tV|`{#^{JOg9A+ok1hd)D-*O4%{iEXDOjR&YQhW zTG0PtB?10nb`AkjbW|78?Jt}3KnA(XbY6OT5oLO~2~K zMW)rA#3>!2uF*#)$LX+w6vOkW2#|j{$1gm~4b0LF z+a;Va1Qi!9G^}wpL`=*%VSeIkiLSRgZTl*=Kf-0<`VtX9$Ffu<83#@&^K7HpDGR&a zs%1Y4RDBbVIzJT&9`?*5f1!Bk8Tl1l!{_{NS?!E*B@4*z2HB4W+m=upWc$^jEeeG! zA@qjKD&BBHg^1E4KhAT$ny=T#$7azouu~4(O^C+%?r)Q8w0``-cC)#-!N0l~=WTJT z*CECU5!(&Ask(9P5ubdvWqtpxyWx_TujrRpsPFmogS5lhOSO#a@#k7tdT`lN<4=O{ zBt)13uL!;d>B>EW9~!$)6>S%I133lRJdC|v$$=U$!)U$f+UheB$cM^2&=hz=`(og~x;9Y5=O z^Dn0&mr0*5|J2$eNAh0QTz!!Qc>?(uyx3pgX&}BhU0g`LcgVeCt(ni{!{yvSQtzm@ z<@RKi^+$O`Ul?z*g|Co(iFZk5XG;CM>>T1uNwQpm&hjfKT&<+BMnP_rwS2IUyE*@( zu&44eq*T6~{h@>Mn9ZJRT6ek(|7wvB!HipWxP#THTI!cAR6RqH;afG~r05Z;{EYMb z7JcvmWW8X;)C<`JrA;7YywE>%B-$wmz*S(=)h%)RVr-zWi(a|~YyVCK`mmaA_>E}d zpg-H=dw81M=`V`(AW^CP7GftC>$a4N@y%L29FPuerKf}DHlcm)*&t^-dMA+kJ2saQt@dE?8@d0vA_Cr&=3U?UIm%+k4*{I`(}M*8Plz zxJS5!a;nqv(guQcxG!@gC+1Ztlp!Cne8+xZ0n4CPlR7`^aDHAUUnayRGZh~Y-Wi1! zkrXZv0cvD-1|-OR#^q1a(&V20?Z^+C=rbH}c55hw_uMP1H~tBW*fF@pU8Xlqp5Q#@ zo8py#!)^XiS5}fMUDcPrWOnnu64GRKJ=Tv|P+0jKhYcTe!3HLq-(fnR#4$Pb#dY#^_}QPzD0Q6h$eT^=w3e zCv3XCDzHOYczsXh!{oz{HY6U?H5!KkKf*G6FyA%EM*C(lrOzen%tqUyV!sFjzEZ#Qp zjLVKs&Kya6)5v7B8wfZACY2nP&#QX}$q&aaoy}Ewv{B7kM*l-NCj`Gea~4y|_A*NLARQ$p zQItQuk0sso+>?N6e*C*)4PZ%gqpvy5-nW+NzX80>BL6|x&OkS>MqH9Tnc#5aBt6Qn z-bwI?zA|UPk@irHY+?_Mj&l=JhQaR}o~cD|hjzK?xjEf!xJBhvu;lXfm`FQU6z2t7 zN)k)kl@Zza*r5qkOI4u1K>Ct*5m3BYvxC8SB|ai*ocYVw3&EF&R~MdbzPH=SgUm!sZMI$l!^Ob&y{Y%3OaE86ACSDq9(!iiS< ze7vDc^=x7lgrO;MDM78G@GCt?$6}pr^|`bic~IghPeB`4w(dY_`ugyOFIWn%>cftO zQD;P*EX#iW^v~={fNsuuR|P(w#7aA2eZsB8b*MOLhrC3_k=P-l=E96MDP8#56%7(vL#Gi zKYqSSVSpECJle|u2^#;{rEl>frqy}a5+lEy})Pv96TzzyARca{0zz{eRK z>d6TaVXD87Q`OAtJL*X?T*zA|+A@QYxp`hVj8niGwOx`a-bvD|+%;2htf=k+&Xz?(5HJUVbIMctatsEdu36vmHWLQV? zO*;X8m?Dkd*Nd1@W69&HbrX8tA3gZPt{hYX^`0-kbm4HiZ~_ydwpOLA;|PRo>I5Bk zX?J%9PC)8+nrNaBlG;DP`ZmwpP@FmWk4~I|tr?W_S>dY~gz*EzO&+G~jhbN%kg#$p z7GZMqV7d*dm^zz=wAqo$&dHkAPlhXlI%1bh15oe6EbFN!d@ly>s$nP@usz_4<9%>_ zx#xA8je0zAkh)c=44V#vl3CS1?=x=urm&S0D`zm!&lZ6pE~}te-DlR5tbZ0K{rcA% zPLunyjRu|C{G)-F4mLSWjn?~&H$E;>(%PbG+*%jk(8*#7_%G6J zzjtYK4DZUPa+MK(N#E+44n~I_N_yOW?Rqp?NJ?jqiw$YfI}U8om27HvTHCye&Xum# zQ~WoOW3fhEq_TeH2Sc^CDqmH_TM1!1HNMhL4CX^TMdJ4xDTi={3)Qq`S<%caYm3Et zORQ_M2^Et!eQj`~w9{ekv#Y^BNILFaILMn={Lwzy>w)Y(r>ws!ghAjT)hnK2YB%NA zK}YvS4bYOkIOR*0QkXrP{{CrO!6DLD750SRaq7_SMYL^c47S7q(#2Xh zX^DDKTC3TiamDjpp(CM|LHf$1VON-4gI^=kn1k|tgXaVyPZ&i8=t!J%Dn$#-N%F-m z)CyzKhQuPidfHYc`PBL^OZf*AqK@Ausm-rHtM@zcTW`98CvxCC8jtsGc<3tR_j}T5LCdTLks=DhhD@Y# z_X)X;{$ERj+%%kaznlGovvlQVzXFT%lR4KcGjqx}tVHE3y!nPaoliUbEm8eb{1X3( zkVbHhNPv(^UtE+V_(4ki0fn8%^O-!I8rtXv_U+fDPpRsH(DB_AWE9atRBS85M)?mSTMx89Kus2dNtY% zAR#uoDF0z8XPhLBm8?uo)9-w+4ByZ~^x?IT^doXN3CM1E`lB1ne~DpvOT2IP#djss z@fVA45b(Jd0!%1>V^!uIB(pI#X0@yNgY6p~D0Z^2^KGcmM9?7Y8!Ut7s}Ls{MClywo5?tnUnm#BDo?uLL!_DX60B z@X(3T&%UBXVV94Cr;O91{Vv$xs#ykB?nw(_Yc>+YIHmJ<@^)(f7O^VzC`;I9Lmubk zB!qe_E@j6mf>?(2W-|qzt%9hVYzNw!(L80hn=DhaK+kcPE!w$qRqZGSOD58*G2KT$kc|X=gTz|P*WI>dpCPUwW;*n_eaPNyeW7Wn9iN&>Ayxu^S(DWa31t$6WN97My_jRwUO@nGGsGup6HnEqagiL zTY5TRgr%+{XnBV+U$og9`*37;r}@5ByEtDJB%2*t!USJ|59rexos;{T)-nMfc#^g{HWhMAg)FZx60XAPWVF+kjG)Yp>fSg+ z`%ZyjJLcW4tUK88#yOhzfd>D89|3&7_i%Rvl?at~iCR8dn1(#N&dsw;8p?-ipyvS@;xU zt>!DraM5WFt5L8fM|#@o@`>-lPFf7bi*bDdJ|2q;Zl6TFH$!ov9?hCbs*r+`zi3Jn zefs&7j<}+Sw>_>#x6}4d^}K6x-aoNVs4S5I`IaLIWv}w|QgT8ufc7^=7@42*#abMJ zxO7|fYf=ly6X|U`<8ocG_3ee*A67omN;60BMXGOiLG900xzC|I9dGlecv=kkd)DuF zZ12BmU#<#r!aIEH0>HCB?e10|jgNp(+%LsSls#`#4Z8#oV>m-aB79^CiSM7MqK;ms znBfFN^`*jqQX2CCXaxnfZMTyKgi| z$#>pH{@dPcg{soh2eO78%=E2x9kZH#{2dO@iuVDD6pO`7Q4Pw@_H}oySc(hy@-I9=zj{4r@Nmr!aUv5q@Q&EB{jLm1vOS zQCuDTx2@SSRg<)dSAOnDU;31gY<~W$SW@?}+7-DN*YjMyL|}@cOYIxSw&{^D^j!d) zt9sqgxS)U0q4BQue*4e^L1GXSA!zhWA$F}4M8(pd>|!{RR3x$oANyr_65tE8X5ZeC zoy>k@och2al@u+{_H_+^pubN%%N#b_w($E{x4Bn+4053pq?;lLFo^tV6l@D<)7r7< z2>gx`FbLUL^7|dw6R~ehh49_6w|&^28DL1*d%U;sF@(i=RaC5&&!-8{>Mi+XDtKU~ zq#4AW5Km@~2R7a2kMBv-m*l|v@D{h2jWJd>0dKAy=fU8w<+R)T%tCF$J2gh0otF9{82S30ZC ztatJF)8Z%|rOk|CpO%sseMF|+4KF7!GZI!ENg+%{N z8+w*lcM)+m>l!EIAE)n3lmPAX-oreR8G#YJnhm34I_Pg(D=m)QU*&r z>~i{BvlY4UsT3uMT17SkS(DuBSKs@>@^o{x8P;JCcsZk~_0b$+(jKUIz-d{VL;v!xD|)XV2i4Dd+$jWEAf1&aCgJpS7ucgVTjMis8f z5s(#oT^d)q2-sk4ciI{3;!n=>=tCWup+(gu|6C7M2=YXjHyATIun_>sxu}cHgKkIZJ1+$wY>_5V z_po5K=N|d851s}u8pHVpMh^&t|K8azvRL1(WEWsu@)<#quw_b18EbW`xN^?wQ6lzQ zseA7Aa%NB@*9r2IqUP_6dbnTpI;uQ(h;C00y;-u5hquYtqpwcd?5ssZscN3`Owi-t z6*{}VqU;j!{mV5n9~lZyIlTO$qER$omM%}XbQf`T$+y(HZEqLceecfoAndrbyt?3w zv+r6yfB;>K+#U~}gz+|u|A_0;@pQM*#>1I9*o(eT;6&~*J3O)qmtM4H|JCy(tk6qj zg)w4=n0*l_DGdEf8xM>iXbU~`jmwJXX1!&+`B(aC{&7Q-dik7Zg4wESu@(;hb>{Zl zf%IEqb4@*t3-mLQ^gK2zO%0+@TAJu^5osdDG`UDpx;5S-{Eu|e!^n{FGr7qz#3Fnh z+gGYgtCwz6xxHJulxfp$EiGr`gW~$?btABZJ2KG5tacbOI+Hrq#;x{$Pq0>$7N*I{ zIPKi>@Jc7qAPrwdApi}+d>jot_;MKr3S<{ucbO`d4tddFyAGavr9m`<(S^tBl2^iKo6?;OuAWuBmt-XED> zKE{+?3zRcVyjfeh$b(;cpJ`ij@^FzP|DAmD3f%TSeBAn}vSD|1kTuuJ_B|ck<#GGq zCHiyQ`Yc__bnho7^13$RH{g>+Xs1vIEVZF>FYa{ijwJ>Key^jVac{ z@nXcFi+5?m$xG6Rgd41oCD>l>Ns+qP_STb#*+cEjpHbQ-f6Ly zoSM=joIC50zJBiaP+pif#-&Bo5f7m1hxNIE2VKrqomtq~*fCk!lg0Yi69d!gH*;rI zo`;yHJVC<&H`r7;B0jyO=lgkz689=0`thA3+vVc zu!1s~^@4GS0_`$1-9?MSlaal-9<+7i+8J-#>>#OVx6_Yjev+dAbSW0{3PlPk;|dCz z)n#-Qg-I&*nhM(03ZX%X@^rbDILQ>`)NzloIYkuFF@qr(5fX``YLkA=;z}cB8YlM9 zS?X^N-|?(h;=l%i`cVBDZI9;iMBe?O8RAHvQ2}gAg!M44zp{hWRJ47pV3A6Z)6CA zwQBcxPkwy#>})K7Kw6MImN5~6iU5fUz1nQvY&!3AXer?g%0pFX)K2*RfSVtgL1}GLQCUk0c#UuI<=)jP|P+{_SH7k4>WZ2$)v@H_n>Lx zHxT||6&;M^Q_4Mm4s8Cs?H0E467J@|f_FZoQdY-b^qX~mrglV{Gu4ZV>pmJ^hs&!> zic)MCGiEsj+POF+L@p@{KMg>%Ngx-vjsJmx@Qx7omcI>t#nXf5=Wn2Oldz0TO$f2q zkWGeMun+|7G+i=(YIgO3M-+TwS*H0c5YR<7#rV6CuM8Qy`mi4Amn3rDH0^JXhsvp< zVEpdc7Pru^OD99otbf=G=*`uo$!}J}3l)n{3C(U=FEX`HK9{4xPN!EU!ASP>WdrI< z$gn%sD2yb99u3i7TjYgxzO-@*Ec+Ap5k%|7Z)?=H7asp8F2@!yQd`WE$71lJUVNjm zd)>RN3hckE@-oECcNICWpXcO{RxzA@a1ZAiZiTD&NIl|-IHkS3W5*6~?6W8k9LLB@=$65>9#k@zstT9Ig zT9@Eu?App>pig#eREOo(n-MDLTRM3A=LAmNOL&UgrNK}h{mHLgwABby{ysKt(unGK zRiZ>z5rjdTs%~RT8`4D3%Mr;Q;R1yy&1u5X!_;cWE4BtxY^KcVTL3is(@E$;otU*k z3*$yGwUD0pPA9jKw~}$pQK5xpoqodzb!Zr2ioZu!bjcXHb$~5=4t;jKlR`q=VKebO zMZRKU?q8;S=IyFVfJznk9-jS6;_;``*gaPVBMb)$FWh`^^qNcjko4w|!IZx@4Zom6 z7UTUT7I3(^Zk7xk;)?; zzAV*ll2hZQ)cR}Yq?|lb=_f23L?nbK&Y3N2$P#2P30pERu~J}1au2aUZzd*IlI8fY zPi#(HTFEDxMi?LkAag*M+t( z@JXc#)t9XHeA6<>mlSr>@vO~{A0Fjx8>Lqk8!ch$>_DYKPNHDhJl^;!mzmPTcu{Of zX5>jcY8F;D-DgN%?X1pD24T)8>gkPfde>JJ$IP4Ao`eWSpWB{K6A(g8nrfEn5sbhW zfc~mZJi`xBpFLAAZ5wg=#Do?!3lcVq9fr@!V+}RO!6J)6I?y3p9Q*dWKm{pcMv zKC+E%1M?xo=QB~&XHeVK%=~W)#r4Z<0!nnSY`DEGfHF)rcS;PX(`ev~`G6X2idt#T^Pb|!VFlSHYc^3N1+(ERc*=3)cH%-xa z(?gtVF#%J^M~G>+9&eZ@8fRF4k>qnP;0{LPt0t60UZ(=m$8c)5;rlhqo^e=eIqq1c zbWw5P)r1?j48!6iazPBxd4!PB2~y*&%p?0DarFE>%t*|fGk2sb#pQ5arzkILfB~oh zsUy6|b(H{*=*uHslLt7ygM2ae$h&0G70(JQ(KMKDj&xV+qq>GurMi2@HmI8^W5=&w zSWQwG5*@{cm1-a#=dnM#@m1TZD**Gok|@9MhBkT(4AfbbJ}>Q7S_j+(b(nwCN<#u_ zaeNDmIP1vV3@(2N_!r1KIK9;UIaQtD_c1u})HrHlXd41>3w z6Cs813!oxlt_9S~Od{->2lo8w*zRUYunWsDn_Sm^g}c5FfSX8TwoEoq!?<>6FQGe7~I!afF8exz^DOU-eZBChFw4jEJhQg+LXUg~NEk@t#R)X-@-YzSFp#F9h^7^@MFOedkK~;TPlN!G};?aqoRtg@Cj;cfvk0 z&0{&=%~Dh7-)%%#;B46bZ{B=+wfM%f#WF}$KEE&XD#G?OeD~6T2qkvYH)iorbe2H- z$<4}5z>$$aTG-fv2;b))LgsEQ9r)xV#4;D$Q6fe4Q3?hp}1*y=I%0pUuX^4)^$;eiU-dOU5Pn zB0H3WXt@IDHjErw_9RM<+w@)8Ttv)y;rVzhs|W}4zXGv1yO+%o%gDxI1d1zuI6LE1 zie{Kd`bfSvKzn~@Y6{pfujX%KUZi1)pE;e1r*UN=g>2|Y_MD33EBsb%N%%fv=7KlD z$B9X?y4}b=@xi>^@OU-!RV}GtasK7o-J6-YqkaMX2$^`OW*AY}fUm ziQ97{G&FMFOJ9+QbKadhQae068MScLg=05S?D;JMf7XmrDC9Mkc&=_B}_ z21*NF4FQULJNQ{rybXi3~LX4t*rt0?DC1}GrXHm(O2a*C9wX|Tj zEgVQk#Nes+o9KIk6cP`89VAMSXh!s56nuY)wm($7X7Y``&J2IR2fAT?S_{@dgso)N ztx?PbBVrt-BVjbwaOjTiEAj?(JZ^fqHFEM&q#jj=mEHY$dg7nil4mp1gcbg~wn_ct z42NgjzzImxv9QsrNSTEHve6V&8&HNj&Tul$vQYo0vAP%=86#G}+DD04+AP*I!wJWJ zr4Jj;OlLbeco+^M%t;nTpDr#ntX+iaYOj*^jmm31B7RdACA~P0gqQZA_u@Nt1O1t4 zY?iIfZ`V4wZ?7kw!lq(;vOhXn&@Z)LHEYNCM1V(qx0^XI=QzpmkOK%<31MbQ^Z51% z6JW0P!vF=s@1|2CcU2f7iW044q^H`knuC;p^}4H|TpqM!+5}YYha*MvCIUMw25~a{rs%y8zG0lc= z4HXRO%TGH=zF2GS_Gt02I~-_Buv$Dwx$u($zVUXQ`|pfXoikTLu(jBrtn z60Er;Ud%TF8u6R+wgxLMX>6VKhoVhF7%Y-x|3JJ`!Fy+QFX2V8rWwYM{ejstdiYnq z3=z-H^2gD2!B2f@I>zb@w;QTCD$`x< zRTy~%z41MB?zXzNsk0Q^g3E>J#4(G*w@;*zskUB&**+xjzB2k)T_|TXzn^XpK~5~t zpxph%N`8Cjs<&t9SYDYrm&a}D<#IgQ7RVVt6MxO%Zg||qB zgyXrr0&QeSF3+yNdN%u%YXk1pyG%*ti&N~s`U@YgRhcRxUtkS z_EG)_l{Mn0Y4YNY#8VTl5Tp4x-aAR}gmdGZ4q51x}7Jl*BS@1x;Nv)6br9V(AoHa#!1)pMfiuL0^+23(R-3p8r0e^6E{FuI9Dn`4}p zWuDe!2Fm?kc?}LT^{4mpGu(#K)rYT*fv0=+xhi@8b3h03MCT~-F9y2gxm6ct^+B`% z)YF!lZy^GwUtAF{a_FZ|DsvSrYVsiq<7(AL8`14h!R zmy!VB510`swgYki5g@az`i>AJw)a1fG#rzJ2XQXWc_u*Q?6imr>56mD%*}5E=-eCi zwpiL8dBC&5a8?o@Ds9e0f0L3i+8jxfUt>5cfZW9Of!iP-zE5FUS08H-!U|n>FGUXE zH<0|c&H(;|3FVKRLQRX=-xelyc|I#+)z-2LDmCqt?l&zH1#K3;7IX=^38el2w!sel zCOr*XmxPT#B!ji{Srn^32rCW?28Q%4qedz6BwlgR||7D(~+s{rmAJ5ZsbBYxRh)~?I6tg?5P6`v|D*BBPVvJ)eF zihIu-5&S&fecnMT*Z=OX>s%E$;lk1~6PP$j43sn2HdpnD@N z0g+azuTUUM?Qsq`_q%XkwF`HH2a*ml8)WHd6pDhZdn?hUcP9HajSS%#?M+}SpZ;31 z+BgvVNkGAj{eukfrWJko?Frn3yqo3f4NT z_rQ?vO^W01UH3^Svv{*k7XMRrWch|NW_Ei5fIC*&#=_!Al0hmj#=*2Uy{1VV|NOjZ z0ny3v{BS$lKu`IwVCUKEa^_5{E{Z_9R_y_qW#6&0T@J?@hZPOrI#AoPhyqIXejia5 z?9Z`y*W&q43jRBJMXufUO9?WAV~agm+?fObZ-O+~H6?XJ$4BwfKAav9+OmF(>yxhQ7jsB^(2=3ZyvSBeb`y3A%p*==4{^=)U6P);KEw$SqbgvySh8byh6y z&ux(YkqICY;N?b+qwZv?T0iqae$>#}xoM}a$nV+X;uHn$Kc9wm4-4cTvEekGK?>wE z0elYgit1g8r8BIKAevM)uM_a8o5xm0#_~AUWo!3&Df?VOiAFn*Yd07TD_B@OfM5W# zKuXC>%G^W*aW1|8Y@oDjD`C8?7gea}vSsVbkAw$vcv2sZkjI6@N666#xKL$esmd?k zRw~l(t9W;_mBF$qx)n?>W6J@22{I{h%K*Nm4r>=kDM<|rW&|D`(#40VUu8C_&t~1M zlIutzZvd&6Vqx(_{6J=2vq2i}8YLuvb7lUtAfK)+1&-_@UPEaGeed6c71w^>RUydA zS9gW+E=6e|8eD14u*(uGaabKdN^oij_T!$|w{(bm z86S}1_Sd1io32NA5a-fI(*ylFrU*?QXHOpHX|B&*UUchG{Mb+$k{qO#ca5`bG6zdy zSpUcPnO^Wxpv{VnT&`1F%(JjKi%I~CK*T4Xg$V`NoI*|W=NA;j4S6;9!nqsr`_3>f zI+W97`8&ZwwtN+q6&=FeZ{QFiNa@lvXV_yd<~pnvAY}kGgavVrrKSkl+H3#^D#nmK zn~}1xxEckB#CI}CD#Qh22R_VGwVXV^AZFNWxoMYgDKnj6Tx=LGY1=p9y=Ra2+Xeld zLM})(%V=lVVh-jm34aEe9@fx5RdhJDnaJk;Z=e5GB9$v??KS*K5bYus7H@ zHb`Ry78Y-37=U>o!ivVwB*pR1e>Ni^rcn&vENwYkxSylCHS*0|DAj1hcGxAbmVrm^e<8lE6Rv@ryz62~k{!HqFS)SKl7; zYK|{WgL$3l0Dg4jwd=y1%Kg?LJ>Z-L1tI}kAJrOI>L=A5j;?}8+Zb|4R9>+rYR&En z!I3oi*$&A))$y@`R<1UHg~iEe zKuTm%08E7OT}9r-TZ;C>UeA$E`7r;~!#tHk)MJNq@#)^A0q+oFI2xq;w#zVQN$3xv zyW>o_3yU5C zkX)#HAn{#SKh8P!*JWX0@dgASb&3z+T8DMb(Z47Q3yb&Q1K<^q^dDC^teuYk6##m`#xG|KCw5q4VPSCqVvt^w&LSTq^PPo-g@uKMg@uKMg@uKMg@uKM kg@uKMg@uKMg~bW{Ki%~Ym7)X)?f?J)07*qoM6N<$f=Q}<^#A|> literal 0 HcmV?d00001 From 7da4208cbfadb63061d8b7eaf13757c4bc4e7b0d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 10 Apr 2017 12:34:17 +0200 Subject: [PATCH 094/197] Added a Graylog containerset --- dcompose-stack/logging/.gitignore | 1 + dcompose-stack/logging/README.md | 9 +++++ dcompose-stack/logging/docker-compose.yml | 49 +++++++++++++++++++++++ dcompose-stack/logging/env.template | 10 +++++ 4 files changed, 69 insertions(+) create mode 100644 dcompose-stack/logging/.gitignore create mode 100644 dcompose-stack/logging/README.md create mode 100644 dcompose-stack/logging/docker-compose.yml create mode 100644 dcompose-stack/logging/env.template diff --git a/dcompose-stack/logging/.gitignore b/dcompose-stack/logging/.gitignore new file mode 100644 index 000000000..f10862a65 --- /dev/null +++ b/dcompose-stack/logging/.gitignore @@ -0,0 +1 @@ +/.env diff --git a/dcompose-stack/logging/README.md b/dcompose-stack/logging/README.md new file mode 100644 index 000000000..b0578428f --- /dev/null +++ b/dcompose-stack/logging/README.md @@ -0,0 +1,9 @@ +# Docker logging with Graylog2 + +This directory sets up a graylog2 instance that docker can stream data to. + +## Usage + +Set up this container by moving `env.template` to `.env` and editing it. See instructions inside the `env.template` on how to set each variable. + +Then go to the [Graylog dashboard](http://localhost:9000). Log in with your chosen password, and navigate to `System -> Inputs`. Choose `GELF UDP` as a source and click `Launch new input`. Set the option to allow Global logs, and name the input `RADAR-Docker`. Now your Graylog instance is ready to collect data from docker on the host it is running on, using the GELF driver with URL `udp://localhost:12201` (replace `localhost` with the hostname where the Graylog is running, if needed). diff --git a/dcompose-stack/logging/docker-compose.yml b/dcompose-stack/logging/docker-compose.yml new file mode 100644 index 000000000..aa2cfd979 --- /dev/null +++ b/dcompose-stack/logging/docker-compose.yml @@ -0,0 +1,49 @@ +--- +version: '3' + +networks: + graylog: + driver: bridge + +volumes: + mongo: {} + elasticsearch: {} + graylog: {} + +services: + + mongo: + image: mongo:3.4.3 + networks: + - graylog + volumes: + - mongo:/data/db + + elasticsearch: + image: elasticsearch:2.4.4-alpine + command: elasticsearch -Des.cluster.name="graylog" + networks: + - graylog + volumes: + - elasticsearch:/usr/share/elasticsearch/data + + graylog: + image: graylog2/server:2.2.3-1 + networks: + - graylog + depends_on: + - mongo + - elasticsearch + links: + - mongo + - elasticsearch + environment: + - GRAYLOG_PASSWORD_SECRET + - GRAYLOG_ROOT_PASSWORD_SHA2 + - GRAYLOG_WEB_ENDPOINT_URI + ports: + - "9000:9000" + - "12201:12201/udp" + volumes: + - graylog:/usr/share/graylog/data/journal + diff --git a/dcompose-stack/logging/env.template b/dcompose-stack/logging/env.template new file mode 100644 index 000000000..e7ced3fb0 --- /dev/null +++ b/dcompose-stack/logging/env.template @@ -0,0 +1,10 @@ +# Set a secret pepper that the passwords will be hashed with +# Minimum length is 16 characters +GRAYLOG_PASSWORD_SECRET= + +# Set a password for the admin user. Obtain the SHA2 of the +# password by running echo -n "mypassword" | shasum -a 256 +GRAYLOG_ROOT_PASSWORD_SHA2= + +# Web address for Graylog to run on +GRAYLOG_WEB_ENDPOINT_URI=http://127.0.0.1:9000/api From 53053357dfde668b9a425d847380c4f8746124f9 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 10 Apr 2017 12:38:19 +0200 Subject: [PATCH 095/197] Renamed .env to graylog.env --- dcompose-stack/logging/.gitignore | 2 +- dcompose-stack/logging/README.md | 2 +- dcompose-stack/logging/docker-compose.yml | 6 ++---- .../logging/{env.template => graylog.env.template} | 0 4 files changed, 4 insertions(+), 6 deletions(-) rename dcompose-stack/logging/{env.template => graylog.env.template} (100%) diff --git a/dcompose-stack/logging/.gitignore b/dcompose-stack/logging/.gitignore index f10862a65..03bd4129b 100644 --- a/dcompose-stack/logging/.gitignore +++ b/dcompose-stack/logging/.gitignore @@ -1 +1 @@ -/.env +*.env diff --git a/dcompose-stack/logging/README.md b/dcompose-stack/logging/README.md index b0578428f..aec30d55c 100644 --- a/dcompose-stack/logging/README.md +++ b/dcompose-stack/logging/README.md @@ -4,6 +4,6 @@ This directory sets up a graylog2 instance that docker can stream data to. ## Usage -Set up this container by moving `env.template` to `.env` and editing it. See instructions inside the `env.template` on how to set each variable. +Set up this container by moving `graylog.env.template` to `graylog.env` and editing it. See instructions inside the `graylog.env.template` on how to set each variable. Then go to the [Graylog dashboard](http://localhost:9000). Log in with your chosen password, and navigate to `System -> Inputs`. Choose `GELF UDP` as a source and click `Launch new input`. Set the option to allow Global logs, and name the input `RADAR-Docker`. Now your Graylog instance is ready to collect data from docker on the host it is running on, using the GELF driver with URL `udp://localhost:12201` (replace `localhost` with the hostname where the Graylog is running, if needed). diff --git a/dcompose-stack/logging/docker-compose.yml b/dcompose-stack/logging/docker-compose.yml index aa2cfd979..63088a2f1 100644 --- a/dcompose-stack/logging/docker-compose.yml +++ b/dcompose-stack/logging/docker-compose.yml @@ -37,10 +37,8 @@ services: links: - mongo - elasticsearch - environment: - - GRAYLOG_PASSWORD_SECRET - - GRAYLOG_ROOT_PASSWORD_SHA2 - - GRAYLOG_WEB_ENDPOINT_URI + env_file: + - ./graylog.env ports: - "9000:9000" - "12201:12201/udp" diff --git a/dcompose-stack/logging/env.template b/dcompose-stack/logging/graylog.env.template similarity index 100% rename from dcompose-stack/logging/env.template rename to dcompose-stack/logging/graylog.env.template From fcc2d0cc99d8cb57855452800a96eec95aabc75b Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 10 Apr 2017 12:43:34 +0200 Subject: [PATCH 096/197] Explicitly open the UDP port --- dcompose-stack/logging/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/logging/docker-compose.yml b/dcompose-stack/logging/docker-compose.yml index 63088a2f1..3a79393fa 100644 --- a/dcompose-stack/logging/docker-compose.yml +++ b/dcompose-stack/logging/docker-compose.yml @@ -41,7 +41,7 @@ services: - ./graylog.env ports: - "9000:9000" - - "12201:12201/udp" + - "12201/udp:12201/udp" volumes: - graylog:/usr/share/graylog/data/journal From 9434f8bf954701b7cc531b74873488fca26aa705 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 10 Apr 2017 12:53:37 +0200 Subject: [PATCH 097/197] Added logging to readme --- README.md | 35 ++++++++++++++++++-------------- dcompose-stack/logging/README.md | 15 ++++++++++++++ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 31a2516e3..b512e7e11 100644 --- a/README.md +++ b/README.md @@ -5,30 +5,28 @@ The dockerized RADAR stack for deploying the RADAR-CNS platform. Component repos ## Installation instructions To install RADAR-CNS stack, do the following: -1. Install Docker Engine - * Installation for macOS (Follow [installer](https://docs.docker.com/engine/installation/mac/) from Docker) - * Installation for Windows ( Follow [installer](https://docs.docker.com/docker-for-windows/ from Docker) - * Installation for Ubuntu (Follow [Docker instructions](https://docs.docker.com/engine/installation/linux/ubuntu/)) - * For other Linux distributions, install Docker engine from [the list by Docker](https://docs.docker.com/engine/installation/). Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following the [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). -2. Install `docker-compose` by following instructions [here](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose) +1. Install [Docker Engine](https://docs.docker.com/engine/installation/) +2. Install `docker-compose` using the [installation guide](https://docs.docker.com/compose/install/) or by following our [wiki](https://github.com/RADAR-CNS/RADAR-Docker/wiki/How-to-set-up-docker-on-ubuntu#install-docker-compose). 3. Verify the Docker installation by running on the command-line: - ```shell - sudo docker --version - sudo docker-compose --version - ``` - This should show Docker version 1.12 or later and docker-compose version 1.9.0 or later. + ```shell + docker --version + docker-compose --version + ``` + This should show Docker version 1.12 or later and docker-compose version 1.9.0 or later. 4. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) for your platform. - 1. For Ubuntu - ```shell - sudo apt-get install git - ``` + 1. For Ubuntu + + ```shell + sudo apt-get install git + ``` 5. Clone [RADAR-Docker](https://github.com/RADAR-CNS/RADAR-Docker) repository from GitHub. ```shell git clone https://github.com/RADAR-CNS/RADAR-Docker.git ``` + 6. Install required component stack following the instructions below. ## Usage @@ -164,14 +162,21 @@ To start RADAR-CNS stack on a single node setup after installing, run cd RADAR-Docker/dcompose-stack/radar-hadoop-cp-stack/ sudo ./start-radar-stack.sh ``` + #### cAdvisor + cAdvisor (Container Advisor) provides container users an understanding of the resource usage and performance characteristics of their running containers. To view current resource performance,if running locally, try [http://localhost:8181](http://localhost:8181). This will bring up the built-in Web UI. Clicking on `/docker` in `Subcontainers` takes you to a new window with all of the Docker containers listed individually. #### Portainer + Portainer provides simple interactive UI-based docker management. If running locally, try [http://localhost:8182](http://localhost:8182) for portainer's UI. To set-up portainer follow this [link](https://www.ostechnix.com/portainer-an-easiest-way-to-manage-docker/). +### Logging + +Set up logging by going to the `dcompose-stack/logging` directory and follow the README there. + ## Work in progress The two following stacks will not work on with only Docker and docker-compose. For the Kerberos stack, the Kerberos image is not public. For the multi-host setup, also docker-swarm and Docker beta versions are needed. diff --git a/dcompose-stack/logging/README.md b/dcompose-stack/logging/README.md index aec30d55c..290d120cf 100644 --- a/dcompose-stack/logging/README.md +++ b/dcompose-stack/logging/README.md @@ -6,4 +6,19 @@ This directory sets up a graylog2 instance that docker can stream data to. Set up this container by moving `graylog.env.template` to `graylog.env` and editing it. See instructions inside the `graylog.env.template` on how to set each variable. +Start the logging container with +```shell +sudo docker-compose up -d +``` +On macOS, omit `sudo` in the command above. + Then go to the [Graylog dashboard](http://localhost:9000). Log in with your chosen password, and navigate to `System -> Inputs`. Choose `GELF UDP` as a source and click `Launch new input`. Set the option to allow Global logs, and name the input `RADAR-Docker`. Now your Graylog instance is ready to collect data from docker on the host it is running on, using the GELF driver with URL `udp://localhost:12201` (replace `localhost` with the hostname where the Graylog is running, if needed). + +Now, other docker containers can be configured to use the `gelf` log driver. In a docker-compose file, add the following lines to a service to let it use Graylog: +```yaml +logging: + driver: gelf + options: + gelf-address: udp://localhost:12201 +``` +Now all docker logs of that service will be forwarded to Graylog From 335dc19180185d6ad9112173411c84a9237d6703 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:12:20 +0200 Subject: [PATCH 098/197] Cleaned up scripts and environment variables --- dcompose-stack/radar-cp-hadoop-stack/.env | 9 --- .../radar-cp-hadoop-stack/.gitignore | 1 + .../radar-cp-hadoop-stack/README.md | 15 ++--- .../radar-cp-hadoop-stack/docker-compose.yml | 3 +- .../radar-cp-hadoop-stack/env.template | 12 ++++ .../install-radar-stack.sh | 67 +++++++------------ .../reboot-radar-stack.sh | 29 +------- .../start-radar-stack.sh | 31 +-------- .../radar-cp-hadoop-stack/stop-radar-stack.sh | 4 +- dcompose-stack/radar-cp-hadoop-stack/util.sh | 41 ++++++++++++ 10 files changed, 94 insertions(+), 118 deletions(-) delete mode 100644 dcompose-stack/radar-cp-hadoop-stack/.env create mode 100644 dcompose-stack/radar-cp-hadoop-stack/env.template create mode 100644 dcompose-stack/radar-cp-hadoop-stack/util.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/.env b/dcompose-stack/radar-cp-hadoop-stack/.env deleted file mode 100644 index b9cff2694..000000000 --- a/dcompose-stack/radar-cp-hadoop-stack/.env +++ /dev/null @@ -1,9 +0,0 @@ -HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 -HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 -HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 -HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 -MONGODB_DIR=/usr/local/var/lib/docker/mongodb -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time -HOTSTORAGE_USERNAME= -HOTSTORAGE_PASSWORD=XXXXXXXX -HOTSTORAGE_NAME= diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index db7bb65a8..06a30dbe1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1 +1,2 @@ /smtp.env +/.env diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 6fb51c7b6..847cced91 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,19 +1,18 @@ # RADAR-CNS with a HDFS connector -In the Dockerfile, 2 redundant HDFS volumes and 2 redundant directories are mounted. Set these directories in the `.env` file, and ensure that their parent directory exists. For proper redundancy, the directories should be set to different physical volumes. +## Installation -Modify `smtp.env.template` to set a SMTP host to send emails with, and move it to `smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. +First move `env.template` file to .env and check and modify all its variables. -Then, create a docker `hadoop` network. +Modify `smtp.env.template` to set a SMTP host to send emails with, and move it to `smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. -```shell -docker network create hadoop -``` +## Usage -Run the full setup with +Run ```shell -sudo docker-compose up -d +./install-radar-stack.sh ``` +to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. Data can be extracted from this setup by running: diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index f228f99f5..084934997 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -182,7 +182,8 @@ services: - "3000:3000" depends_on: - rest-api - + environment: + API_URI: http://${SERVER_NAME}:8080/api #---------------------------------------------------------------------------# # RADAR Cold Storage # diff --git a/dcompose-stack/radar-cp-hadoop-stack/env.template b/dcompose-stack/radar-cp-hadoop-stack/env.template new file mode 100644 index 000000000..36ca1cc35 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/env.template @@ -0,0 +1,12 @@ +SERVER_NAME=localhost +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered +RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered +RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime, application_external_time +HOTSTORAGE_USERNAME= +HOTSTORAGE_PASSWORD=XXXXXXXX +HOTSTORAGE_NAME= +HDFS_DATA_DIR_1=/usr/local/var/lib/docker/hdfs-data-1 +HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 +HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 +HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 +MONGODB_DIR=/usr/local/var/lib/docker/mongodb diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 2541eb836..162fd4466 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -1,55 +1,34 @@ #!/bin/bash -RADAR_RAW_TOPIC_LIST="android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time" -RADAR_AGG_TOPIC_LIST="android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime, application_external_time" - -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -echo "Linux version: "$(uname -a) - -if command_exists docker - then - echo "Docker version: "$(docker --version) - else - echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" - exit 1 -fi - -if command_exists docker-compose - then - echo "Docker-compose version: "$(docker-compose --version) - else - echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" - exit 1 +. ./util.sh +. ./.env + +check_parent_exists HDFS_DATA_DIR_1 ${HDFS_DATA_DIR_1} +check_parent_exists HDFS_DATA_DIR_2 ${HDFS_DATA_DIR_2} +check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} +check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} +check_parent_exists MONGODB_DIR ${MONGODB_DIR} + +if [ -z $(sudo-docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then + echo "==> Creating docker network - hadoop" + sudo-docker network create hadoop +else + echo "==> Creating docker network - hadoop ALREADY EXISTS" fi -if [ ! -d /usr/local/var/lib/docker ]; then - echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" - exit 1 -fi - -echo "==> Creating docker network - hadoop" -sudo docker network create hadoop - echo "==> Setting MongoDB Connector" -# Extract credentials from .env file -username=$(cat .env | grep HOTSTORAGE_USERNAME) -password=$(cat .env | grep HOTSTORAGE_PASSWORD) -database=$(cat .env | grep HOTSTORAGE_NAME) -username="$(echo -e "${username:20}" | tr -d '[:space:]' )" -password="$(echo -e "${password:20}" | tr -d '[:space:]' )" -database="$(echo -e "${database:16}" | tr -d '[:space:]' )" + # Update sink-mongo.properties -sed -i '/mongo.username=/c\mongo.username='$username sink-mongo.properties -sed -i '/mongo.password=/c\mongo.password='$password sink-mongo.properties -sed -i '/mongo.database=/c\mongo.database='$database sink-mongo.properties +sed -i '' 's/\(mongo.username=\).*$/\1'${HOTSTORAGE_USERNAME}'/' sink-mongo.properties +sed -i '' 's/\(mongo.password=\).*$/\1'${HOTSTORAGE_PASSWORD}'/' sink-mongo.properties +sed -i '' 's/\(mongo.database=\).*$/\1'${HOTSTORAGE_NAME}'/' sink-mongo.properties +sed -i '' 's/\(server_name[[:space:]]*\).*$/\1'${SERVER_NAME}'/' nginx.conf + # Set topics -sed -i '/topics=/c\topics='"$RADAR_AGG_TOPIC_LIST" sink-mongo.properties +sed -i '' 's/\(topics=\).*$/\1'${RADAR_AGG_TOPIC_LIST}'/' sink-mongo.properties echo "==> Setting HDFS Connector" -sed -i '/topics=/c\topics='"$RADAR_RAW_TOPIC_LIST" sink-hdfs.properties +sed -i '' 's|\(topics=\).*$|\1'${RADAR_RAW_TOPIC_LIST}'|' sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" -sudo docker-compose up -d +sudo-docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh index 50768892b..1a2c76ae8 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh @@ -1,31 +1,6 @@ #!/bin/bash -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -echo "Linux version: "$(uname -a) - -if command_exists docker - then - echo "Docker version: "$(docker --version) - else - echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" - exit 1 -fi - -if command_exists docker-compose - then - echo "Docker-compose version: "$(docker-compose --version) - else - echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" - exit 1 -fi - -if [ ! -d /usr/local/var/lib/docker ]; then - echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" - exit 1 -fi +. ./util.sh echo "==> Restarting RADAR-CNS Platform" -sudo docker-compose restart \ No newline at end of file +sudo-docker-compose restart diff --git a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh index 4bfda70a0..b88ec68d3 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh @@ -1,31 +1,6 @@ #!/bin/bash -command_exists() { - command -v "$@" > /dev/null 2>&1 -} +. ./util.sh -echo "Linux version: "$(uname -a) - -if command_exists docker - then - echo "Docker version: "$(docker --version) - else - echo "RADAR-CNS cannot start without Docker. Please, install Docker and then try again" - exit 1 -fi - -if command_exists docker-compose - then - echo "Docker-compose version: "$(docker-compose --version) - else - echo "RADAR-CNS cannot start without docker-compose. Please, install docker-compose and then try again" - exit 1 -fi - -if [ ! -d /usr/local/var/lib/docker ]; then - echo "RADAR-CNS stores HDFS volumes at /usr/local/var/lib/docker. If this folder does not exist, please create the entire path and then try again" - exit 1 -fi - -echo "==> Starting RADAR-CNS Platform" -sudo docker-compose start +echo "==> Starting radar-cns platform" +sudo-docker-compose start diff --git a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh index 505aa969b..8dd22a4d5 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh @@ -1,4 +1,6 @@ #!/bin/bash +. ./util.sh + echo "==> Stopping RADAR-CNS Stack" -sudo docker-compose stop \ No newline at end of file +sudo-docker-compose stop diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh new file mode 100644 index 000000000..0a49984ce --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +check_command_exists() { + if command -v "$1" > /dev/null 2>&1; then + echo "$1 version: $($1 --version)" + else + echo "RADAR-CNS cannot start without $1. Please, install $1 and then try again" + exit 1 + fi +} + +check_parent_exists() { + if [ -z "$2" ]; then + echo "Directory variable $1 is not set in .env" + fi + PARENT=$(dirname $2) + if [ ! -d "${PARENT}" ]; then + echo "RADAR-CNS stores volumes at ${PARENT}. If this folder does not exist, please create the entire path and then try again" + exit 1 + fi +} + +sudo-docker() { + if [ $(uname) == "Darwin" ]; then + docker "$@" + else + sudo docker "$@" + fi +} + +sudo-docker-compose() { + if [ $(uname) == "Darwin" ]; then + docker-compose "$@" + else + sudo docker-compose "$@" + fi +} + +echo "OS version: "$(uname -a) +check_command_exists docker +check_command_exists docker-compose From 6900a104a7d82da1e7b434c7e3532fa77b1b4414 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:15:55 +0200 Subject: [PATCH 099/197] Added volumes for the persistence of containers --- .../radar-cp-hadoop-stack/docker-compose.yml | 52 +++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 084934997..aef44b2a8 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -1,5 +1,5 @@ --- -version: '2' +version: '2.1' networks: zookeeper: @@ -12,6 +12,15 @@ networks: external: true mail: driver: bridge + monitor: + driver: bridge + hotstorage: + driver: bridge + +volumes: + kafka-1-data: {} + kafka-2-data: {} + kafka-3-data: {} services: @@ -22,6 +31,9 @@ services: image: confluentinc/cp-zookeeper:3.1.2-1 networks: - zookeeper + volumes: + - /var/lib/zookeeper/data + - /var/lib/zookeeper/logs environment: ZOOKEEPER_SERVER_ID: 1 ZOOKEEPER_CLIENT_PORT: 2181 @@ -38,8 +50,11 @@ services: networks: - kafka - zookeeper + volumes: + - kafka-1-data:/var/lib/kafka/data depends_on: - zookeeper-1 + restart: always environment: KAFKA_BROKER_ID: 1 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 @@ -51,8 +66,11 @@ services: networks: - kafka - zookeeper + volumes: + - kafka-2-data:/var/lib/kafka/data depends_on: - kafka-1 + restart: always environment: KAFKA_BROKER_ID: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 @@ -64,6 +82,8 @@ services: networks: - kafka - zookeeper + volumes: + - kafka-3-data:/var/lib/kafka/data depends_on: - kafka-2 environment: @@ -107,6 +127,7 @@ services: - schema-registry-1 ports: - "8082:8082" + restart: always environment: KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 @@ -142,13 +163,14 @@ services: hotstorage: image: radarcns/radar-hotstorage:0.1 networks: - - api + - hotstorage volumes: - "${MONGODB_DIR}/db:/data/db" - "${MONGODB_DIR}/configdb:/data/configdb" ports: - "27017:27017" - "28017:28017" + restart: always environment: RADAR_USER: ${HOTSTORAGE_USERNAME} RADAR_PWD: ${HOTSTORAGE_PASSWORD} @@ -160,16 +182,23 @@ services: rest-api: image: radarcns/radar-restapi:0.1 networks: + - hotstorage - api ports: - "8080:8080" depends_on: - hotstorage + restart: always environment: MONGODB_USER: ${HOTSTORAGE_USERNAME} MONGODB_PASS: ${HOTSTORAGE_PASSWORD} MONGODB_DATABASE: ${HOTSTORAGE_NAME} MONGODB_HOST: hotstorage:27017 + healthcheck: + test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:8080/radar/api/"] + interval: 1m + timeout: 5s + retries: 3 #---------------------------------------------------------------------------# # RADAR Dashboard # @@ -182,8 +211,14 @@ services: - "3000:3000" depends_on: - rest-api + restart: always environment: API_URI: http://${SERVER_NAME}:8080/api + healthcheck: + test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:3000/"] + interval: 1m + timeout: 5s + retries: 3 #---------------------------------------------------------------------------# # RADAR Cold Storage # @@ -194,6 +229,7 @@ services: - hadoop volumes: - "${HDFS_DATA_DIR_1}:/hadoop/dfs/data" + restart: always environment: CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 HDFS_CONF_dfs_replication: 2 @@ -204,6 +240,7 @@ services: - hadoop volumes: - "${HDFS_DATA_DIR_2}:/hadoop/dfs/data" + restart: always environment: CORE_CONF_fs_defaultFS: hdfs://hdfs-namenode:8020 HDFS_CONF_dfs_replication: 2 @@ -216,6 +253,7 @@ services: volumes: - "${HDFS_NAME_DIR_1}:/hadoop/dfs/name/1" - "${HDFS_NAME_DIR_2}:/hadoop/dfs/name/2" + restart: always environment: CLUSTER_NAME: radar-cns HDFS_CONF_dfs_namenode_name_dir: file:///hadoop/dfs/name/1,file:///hadoop/dfs/name/2 @@ -229,6 +267,7 @@ services: - mail volumes: - /var/spool/exim + restart: always env_file: - smtp.env @@ -243,7 +282,7 @@ services: networks: - zookeeper - kafka - - api + - hotstorage depends_on: - zookeeper-1 - kafka-1 @@ -364,6 +403,7 @@ services: - smtp volumes: - ./radar.yml:/etc/radar.yml + restart: always environment: KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} @@ -373,6 +413,8 @@ services: #---------------------------------------------------------------------------# cadvisor: image: google/cadvisor:v0.24.1 + networks: + - monitor ports: - "8181:8080" volumes: @@ -380,10 +422,14 @@ services: - "/var/run:/var/run:rw" - "/sys:/sys:ro" - "/var/lib/docker/:/var/lib/docker:ro" + restart: always portainer: image: portainer/portainer:1.11.1 + networks: + - monitor ports: - "8182:9000" volumes: - "/var/run/docker.sock:/var/run/docker.sock" + restart: always From 5d8f9b5e1da2f4166e54ec584c5bbf0b79f4e01e Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:30:42 +0200 Subject: [PATCH 100/197] Added nginx to the stack --- .../radar-cp-hadoop-stack/docker-compose.yml | 23 ++++++++ .../radar-cp-hadoop-stack/nginx.conf | 59 +++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/nginx.conf diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index aef44b2a8..519a1b859 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -100,6 +100,7 @@ services: networks: - kafka - zookeeper + - api depends_on: - kafka-1 - kafka-2 @@ -120,6 +121,7 @@ services: networks: - kafka - zookeeper + - api depends_on: - kafka-1 - kafka-2 @@ -433,3 +435,24 @@ services: volumes: - "/var/run/docker.sock:/var/run/docker.sock" restart: always + + #---------------------------------------------------------------------------# + # Webserver # + #---------------------------------------------------------------------------# + webserver: + image: nginx:1.12-alpine + restart: always + networks: + - api + - monitor + depends_on: + - cadvisor + - portainer + - rest-api + - schema-registry-1 + - rest-proxy-1 + - dashboard + ports: + - "80:80" + volumes: + - "./nginx.conf:/etc/nginx/nginx.conf:ro" diff --git a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf new file mode 100644 index 000000000..ad867be23 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf @@ -0,0 +1,59 @@ +worker_rlimit_nofile 8192; + +events { + worker_connections 4096; ## Default: 1024 +} + +http { + index index.html index.htm index.php; + + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + tcp_nodelay on; + + server { # simple reverse-proxy + listen 80; + server_name localhost + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + location /kafka/ { + rewrite /kafka/(.*) /$1 break; + proxy_pass http://rest-proxy-1:8082; + proxy_redirect off; + proxy_set_header Host $host; + } + location /schema/ { + rewrite /schema/(.*) /$1 break; + proxy_pass http://schema-registry-1:8081; + proxy_redirect off; + proxy_set_header Host $host; + } + location /dashboard/ { + rewrite /dashboard(.*) /$1 break; + proxy_pass http://dashboard:3000; + proxy_redirect off; + proxy_set_header Host $host; + } + location /cadvisor/ { + rewrite /cadvisor/(.*) /$1 break; + proxy_pass http://cadvisor:8080; + proxy_redirect off; + proxy_set_header Host $host; + } + location /portainer/ { + rewrite /portainer/(.*) /$1 break; + proxy_pass http://portainer:9000; + proxy_redirect off; + proxy_set_header Host $host; + } + location /api/ { + rewrite /api/(.*) /radar/api/$1 break; + proxy_pass http://rest-api:8080; + proxy_redirect off; + proxy_set_header Host $host; + } + } +} From 945e62d203fff06b85803430178e9361c2b4dd08 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:31:12 +0200 Subject: [PATCH 101/197] Ignore output folder --- dcompose-stack/radar-cp-hadoop-stack/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index 06a30dbe1..4be7db9b4 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1,2 +1,3 @@ /smtp.env /.env +/output/ From 18799b043ef066fd5c5fc504b56adb5a3871ce02 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:33:57 +0200 Subject: [PATCH 102/197] Removed all open ports except nginx; that is the gateway now. --- .../radar-cp-hadoop-stack/docker-compose.yml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 519a1b859..73b401363 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -106,8 +106,6 @@ services: - kafka-2 - kafka-3 restart: always - ports: - - "8081:8081" environment: SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:2181 SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 @@ -127,8 +125,6 @@ services: - kafka-2 - kafka-3 - schema-registry-1 - ports: - - "8082:8082" restart: always environment: KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181 @@ -169,9 +165,6 @@ services: volumes: - "${MONGODB_DIR}/db:/data/db" - "${MONGODB_DIR}/configdb:/data/configdb" - ports: - - "27017:27017" - - "28017:28017" restart: always environment: RADAR_USER: ${HOTSTORAGE_USERNAME} @@ -186,8 +179,6 @@ services: networks: - hotstorage - api - ports: - - "8080:8080" depends_on: - hotstorage restart: always @@ -209,8 +200,6 @@ services: image: radarcns/radar-dashboard:latest networks: - api - ports: - - "3000:3000" depends_on: - rest-api restart: always @@ -417,8 +406,6 @@ services: image: google/cadvisor:v0.24.1 networks: - monitor - ports: - - "8181:8080" volumes: - "/:/rootfs:ro" - "/var/run:/var/run:rw" @@ -430,8 +417,6 @@ services: image: portainer/portainer:1.11.1 networks: - monitor - ports: - - "8182:9000" volumes: - "/var/run/docker.sock:/var/run/docker.sock" restart: always From 1386f84971008781cb513879242e9eac2c69d879 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:38:24 +0200 Subject: [PATCH 103/197] Use the latest stable docker-compose --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8587b95c7..a24a7453c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: required services: - docker env: - DOCKER_COMPOSE_VERSION: 1.9.0 + DOCKER_COMPOSE_VERSION: 1.11.2 before_install: - mkdir -p "$HOME/bin"; @@ -25,6 +25,7 @@ script: # With email and HDFS support - cd ../radar-cp-hadoop-stack - sudo docker network create hadoop + - export SERVER_NAME=localhost - export HDFS_DATA_DIR_1=$PWD/hdfs-data1 - export HDFS_DATA_DIR_2=$PWD/hdfs-data2 - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 From 1ab8d0353c1def60a686b2b065b6d026077e8676 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:44:35 +0200 Subject: [PATCH 104/197] Use new docker version when doing tests --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index a24a7453c..111e264cb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,8 @@ env: DOCKER_COMPOSE_VERSION: 1.11.2 before_install: + - sudo apt-get update + - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-engine - mkdir -p "$HOME/bin"; - export PATH="$PATH:$HOME/bin"; - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; From f39706716512e27a6599eff1c89dec4c1fd4cc10 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:45:38 +0200 Subject: [PATCH 105/197] Remove spaces from env variables --- dcompose-stack/radar-cp-hadoop-stack/env.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/env.template b/dcompose-stack/radar-cp-hadoop-stack/env.template index 36ca1cc35..9d093f8b2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/env.template @@ -1,7 +1,7 @@ SERVER_NAME=localhost RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered -RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output, android_empatica_e4_battery_level_output, android_empatica_e4_blood_volume_pulse_output, android_empatica_e4_electrodermal_activity_output, android_empatica_e4_heartrate, android_empatica_e4_inter_beat_interval_output, android_empatica_e4_sensor_status_output, android_empatica_e4_temperature_output, application_server_status, application_record_counts, application_uptime, application_external_time +RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= From 9d8636a54352c0e855d258f4979280639f010939 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:48:27 +0200 Subject: [PATCH 106/197] Also mention radar.yml in the README --- dcompose-stack/radar-cp-hadoop-stack/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 847cced91..acb3422c6 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -1,11 +1,13 @@ # RADAR-CNS with a HDFS connector -## Installation +## Configuration First move `env.template` file to .env and check and modify all its variables. Modify `smtp.env.template` to set a SMTP host to send emails with, and move it to `smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. +Finally, edit `radar.yml`, especially concerning the monitor email address configuration. + ## Usage Run From 49d0c2329170755ca366ababad870a909eaf13ee Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 11:58:14 +0200 Subject: [PATCH 107/197] Use OS independent sed --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 12 ++++++------ dcompose-stack/radar-cp-hadoop-stack/util.sh | 8 ++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 162fd4466..a5563c4a1 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -19,16 +19,16 @@ fi echo "==> Setting MongoDB Connector" # Update sink-mongo.properties -sed -i '' 's/\(mongo.username=\).*$/\1'${HOTSTORAGE_USERNAME}'/' sink-mongo.properties -sed -i '' 's/\(mongo.password=\).*$/\1'${HOTSTORAGE_PASSWORD}'/' sink-mongo.properties -sed -i '' 's/\(mongo.database=\).*$/\1'${HOTSTORAGE_NAME}'/' sink-mongo.properties -sed -i '' 's/\(server_name[[:space:]]*\).*$/\1'${SERVER_NAME}'/' nginx.conf +sedi 's/\(mongo.username=\).*$/\1'${HOTSTORAGE_USERNAME}'/' sink-mongo.properties +sedi 's/\(mongo.password=\).*$/\1'${HOTSTORAGE_PASSWORD}'/' sink-mongo.properties +sedi 's/\(mongo.database=\).*$/\1'${HOTSTORAGE_NAME}'/' sink-mongo.properties +sedi 's/\(server_name[[:space:]]*\).*$/\1'${SERVER_NAME}'/' nginx.conf # Set topics -sed -i '' 's/\(topics=\).*$/\1'${RADAR_AGG_TOPIC_LIST}'/' sink-mongo.properties +sedi 's/\(topics=\).*$/\1'${RADAR_AGG_TOPIC_LIST}'/' sink-mongo.properties echo "==> Setting HDFS Connector" -sed -i '' 's|\(topics=\).*$|\1'${RADAR_RAW_TOPIC_LIST}'|' sink-hdfs.properties +sedi 's|\(topics=\).*$|\1'${RADAR_RAW_TOPIC_LIST}'|' sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" sudo-docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 0a49984ce..e018230d5 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -36,6 +36,14 @@ sudo-docker-compose() { fi } +sedi() { + if [ $(uname) == "Darwin" ]; then + sed -i '' "$@" + else + sed -i -- "$@" + fi +} + echo "OS version: "$(uname -a) check_command_exists docker check_command_exists docker-compose From e59a37d1ac2fd0e44d79e9d6ec69b08c66769f82 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 12:08:49 +0200 Subject: [PATCH 108/197] Use function instead of sed replace --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 12 ++++++------ dcompose-stack/radar-cp-hadoop-stack/util.sh | 13 ++++++++++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index a5563c4a1..bf3e3e952 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -19,16 +19,16 @@ fi echo "==> Setting MongoDB Connector" # Update sink-mongo.properties -sedi 's/\(mongo.username=\).*$/\1'${HOTSTORAGE_USERNAME}'/' sink-mongo.properties -sedi 's/\(mongo.password=\).*$/\1'${HOTSTORAGE_PASSWORD}'/' sink-mongo.properties -sedi 's/\(mongo.database=\).*$/\1'${HOTSTORAGE_NAME}'/' sink-mongo.properties -sedi 's/\(server_name[[:space:]]*\).*$/\1'${SERVER_NAME}'/' nginx.conf +inline_variable 'mongo.username=' $HOTSTORAGE_USERNAME sink-mongo.properties +inline_variable 'mongo.password=' $HOTSTORAGE_PASSWORD sink-mongo.properties +inline_variable 'mongo.database=' $HOTSTORAGE_NAME sink-mongo.properties +inline_variable 'server_name[[:space:]]*' $SERVER_NAME nginx.conf # Set topics -sedi 's/\(topics=\).*$/\1'${RADAR_AGG_TOPIC_LIST}'/' sink-mongo.properties +inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" sink-mongo.properties echo "==> Setting HDFS Connector" -sedi 's|\(topics=\).*$|\1'${RADAR_RAW_TOPIC_LIST}'|' sink-hdfs.properties +inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" sudo-docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index e018230d5..e820673ed 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -36,11 +36,18 @@ sudo-docker-compose() { fi } -sedi() { +# Inline variable into a file, keeping indentation. +# Usage: +# inline_variable VARIABLE_SET VALUE FILE +# where VARIABLE_SET is a regex of the pattern currently used in given file to set a variable to a value. +# Example: +# inline_variable 'a=' 123 test.txt +# will replace a line ' a=232 ' with ' a=123' +inline_variable() { if [ $(uname) == "Darwin" ]; then - sed -i '' "$@" + sed -i '' 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 else - sed -i -- "$@" + sed -i -- 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 fi } From 81b9e0e1082e229b52739894fb71b7bc74ec7ad8 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 12:38:05 +0200 Subject: [PATCH 109/197] Access cadvisor directly: it doesn't like reverse proxies --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 2 ++ dcompose-stack/radar-cp-hadoop-stack/nginx.conf | 6 ------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 73b401363..13cd459de 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -406,6 +406,8 @@ services: image: google/cadvisor:v0.24.1 networks: - monitor + ports: + - "8080:8080" volumes: - "/:/rootfs:ro" - "/var/run:/var/run:rw" diff --git a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf index ad867be23..c33eba2e7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf +++ b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf @@ -37,12 +37,6 @@ http { proxy_redirect off; proxy_set_header Host $host; } - location /cadvisor/ { - rewrite /cadvisor/(.*) /$1 break; - proxy_pass http://cadvisor:8080; - proxy_redirect off; - proxy_set_header Host $host; - } location /portainer/ { rewrite /portainer/(.*) /$1 break; proxy_pass http://portainer:9000; From e982e7586b546301a22ee7e92c9b38ea9cd3282d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 12:38:35 +0200 Subject: [PATCH 110/197] Fixed portainer reverse proxy setup --- dcompose-stack/radar-cp-hadoop-stack/nginx.conf | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf index c33eba2e7..7a53b0b70 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf +++ b/dcompose-stack/radar-cp-hadoop-stack/nginx.conf @@ -41,7 +41,16 @@ http { rewrite /portainer/(.*) /$1 break; proxy_pass http://portainer:9000; proxy_redirect off; - proxy_set_header Host $host; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + location /portainer/api/websocket/ { + rewrite /portainer/api/websocket/(.*) /$1 break; + proxy_pass http://portainer:9000; + proxy_redirect off; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_http_version 1.1; } location /api/ { rewrite /api/(.*) /radar/api/$1 break; From 250277877e1bec9e5d3016c84276321e7551fd31 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 13:38:48 +0200 Subject: [PATCH 111/197] Show docker version --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 111e264cb..076b47623 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,7 @@ env: before_install: - sudo apt-get update - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-engine + - docker --version - mkdir -p "$HOME/bin"; - export PATH="$PATH:$HOME/bin"; - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; From feba631979663dc3a13d3e780159d6921c3537cf Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 13:45:25 +0200 Subject: [PATCH 112/197] Print docker-compose version --- .travis.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 076b47623..87bd6d2ae 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,13 +10,14 @@ before_install: - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-engine - docker --version - mkdir -p "$HOME/bin"; - - export PATH="$PATH:$HOME/bin"; + - export PATH="$HOME/bin:$PATH"; - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; - chmod +x "$HOME/bin/docker-compose"; + - sudo docker-compose --version script: # Standard stack - cd dcompose-stack/radar-cp-stack - - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] + - sudo docker-compose up -d --build && sleep 15 && [ -z "$(sudo docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo docker-compose down # With kerberos support From 32059e343d068df6349575acd4e191eb67d7336d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 26 Apr 2017 13:51:33 +0200 Subject: [PATCH 113/197] Use the downloaded docker-compose everywhere --- .travis.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 87bd6d2ae..f8bbfe419 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,18 +13,18 @@ before_install: - export PATH="$HOME/bin:$PATH"; - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > "$HOME/bin/docker-compose"; - chmod +x "$HOME/bin/docker-compose"; - - sudo docker-compose --version + - sudo $HOME/bin/docker-compose --version script: # Standard stack - cd dcompose-stack/radar-cp-stack - - sudo docker-compose up -d --build && sleep 15 && [ -z "$(sudo docker-compose ps | tail -n +3 | grep " Exit ")" ] - - sudo docker-compose down + - sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$(sudo $HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] + - sudo $HOME/bin/docker-compose down # With kerberos support # NOT SUPPORTED: kerberos image cannot be found #- cd ../radar-cp-sasl-stack - #- sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - #- sudo docker-compose down + #- sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] + #- sudo $HOME/bin/docker-compose down # With email and HDFS support - cd ../radar-cp-hadoop-stack @@ -35,13 +35,13 @@ script: - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > smtp.env - - sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - - sudo docker-compose down + - sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] + - sudo $HOME/bin/docker-compose down - sudo docker network rm hadoop # With Docker Swarm support # NOT SUPPORTED: docker swarm and docker beta features are not available in Travis #- cd ../radar-cp-swarm-stack #- sudo docker network create --attachable hadoop - #- sudo docker-compose up -d --build && sleep 15 && [ -z "$(docker-compose ps | tail -n +3 | grep " Exit ")" ] - #- sudo docker-compose down + #- sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] + #- sudo $HOME/bin/docker-compose down From 34ecd4a6338918fcafec677534666e5b116c0b2e Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 10:38:11 +0200 Subject: [PATCH 114/197] Sudo only for linux --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 6 +++--- .../radar-cp-hadoop-stack/reboot-radar-stack.sh | 2 +- .../radar-cp-hadoop-stack/stop-radar-stack.sh | 2 +- dcompose-stack/radar-cp-hadoop-stack/util.sh | 16 ++++------------ 4 files changed, 9 insertions(+), 17 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index bf3e3e952..45d836f46 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -9,9 +9,9 @@ check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} check_parent_exists MONGODB_DIR ${MONGODB_DIR} -if [ -z $(sudo-docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then +if [ -z $(sudo-linux docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then echo "==> Creating docker network - hadoop" - sudo-docker network create hadoop + sudo-linux docker network create hadoop else echo "==> Creating docker network - hadoop ALREADY EXISTS" fi @@ -31,4 +31,4 @@ echo "==> Setting HDFS Connector" inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" -sudo-docker-compose up --force-recreate -d +sudo-linux docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh index 1a2c76ae8..ae5f1df49 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/reboot-radar-stack.sh @@ -3,4 +3,4 @@ . ./util.sh echo "==> Restarting RADAR-CNS Platform" -sudo-docker-compose restart +sudo-linux docker-compose restart diff --git a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh index 8dd22a4d5..a11b6defa 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/stop-radar-stack.sh @@ -3,4 +3,4 @@ . ./util.sh echo "==> Stopping RADAR-CNS Stack" -sudo-docker-compose stop +sudo-linux docker-compose stop diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index e820673ed..0614ab902 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -20,19 +20,11 @@ check_parent_exists() { fi } -sudo-docker() { +sudo-linux() { if [ $(uname) == "Darwin" ]; then - docker "$@" + "$@" else - sudo docker "$@" - fi -} - -sudo-docker-compose() { - if [ $(uname) == "Darwin" ]; then - docker-compose "$@" - else - sudo docker-compose "$@" + sudo "$@" fi } @@ -51,6 +43,6 @@ inline_variable() { fi } -echo "OS version: "$(uname -a) +echo "OS version: $(uname -a)" check_command_exists docker check_command_exists docker-compose From 095432e0a17d9895e8b9b0bf15a9e0654070eaa6 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 10:45:12 +0200 Subject: [PATCH 115/197] Moved configuration files to etc --- dcompose-stack/radar-cp-hadoop-stack/.gitignore | 2 +- dcompose-stack/radar-cp-hadoop-stack/README.md | 6 +++--- .../radar-cp-hadoop-stack/docker-compose.yml | 12 ++++++------ .../radar-cp-hadoop-stack/{ => etc}/env.template | 0 .../radar-cp-hadoop-stack/{ => etc}/nginx.conf | 0 .../radar-cp-hadoop-stack/{ => etc}/radar.yml | 0 .../{ => etc}/sink-hdfs-high.properties | 0 .../{ => etc}/sink-hdfs-low.properties | 0 .../{ => etc}/sink-hdfs-med.properties | 0 .../{ => etc}/sink-hdfs.properties | 0 .../{ => etc}/sink-mongo.properties | 0 .../{ => etc}/smtp.env.template | 0 12 files changed, 10 insertions(+), 10 deletions(-) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/env.template (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/nginx.conf (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/radar.yml (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/sink-hdfs-high.properties (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/sink-hdfs-low.properties (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/sink-hdfs-med.properties (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/sink-hdfs.properties (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/sink-mongo.properties (100%) rename dcompose-stack/radar-cp-hadoop-stack/{ => etc}/smtp.env.template (100%) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index 4be7db9b4..20ce7c905 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1,3 +1,3 @@ -/smtp.env +/etc/smtp.env /.env /output/ diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index acb3422c6..15322ce77 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -2,11 +2,11 @@ ## Configuration -First move `env.template` file to .env and check and modify all its variables. +First move `etc/env.template` file to `./.env` and check and modify all its variables. -Modify `smtp.env.template` to set a SMTP host to send emails with, and move it to `smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. +Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. -Finally, edit `radar.yml`, especially concerning the monitor email address configuration. +Finally, edit `etc/radar.yml`, especially concerning the monitor email address configuration. ## Usage diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 13cd459de..d82297df0 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -260,7 +260,7 @@ services: - /var/spool/exim restart: always env_file: - - smtp.env + - ./etc/smtp.env #---------------------------------------------------------------------------# # RADAR mongo connector # @@ -269,7 +269,7 @@ services: image: radarcns/radar-mongodb-connector-auto:0.2 restart: on-failure volumes: - - ./sink-mongo.properties:/etc/kafka-connect/sink.properties + - ./etc/sink-mongo.properties:/etc/kafka-connect/sink.properties networks: - zookeeper - kafka @@ -313,7 +313,7 @@ services: image: radarcns/radar-hdfs-connector-auto:0.2 restart: on-failure volumes: - - ./sink-hdfs.properties:/etc/kafka-connect/sink-hdfs.properties + - ./etc/sink-hdfs.properties:/etc/kafka-connect/sink-hdfs.properties networks: - zookeeper - kafka @@ -368,7 +368,7 @@ services: - schema-registry-1 - kafka-init volumes: - - ./radar.yml:/etc/radar.yml + - ./etc/radar.yml:/etc/radar.yml environment: KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} @@ -393,7 +393,7 @@ services: - kafka-init - smtp volumes: - - ./radar.yml:/etc/radar.yml + - ./etc/radar.yml:/etc/radar.yml restart: always environment: KAFKA_REST_PROXY: http://rest-proxy-1:8082 @@ -442,4 +442,4 @@ services: ports: - "80:80" volumes: - - "./nginx.conf:/etc/nginx/nginx.conf:ro" + - "./etc/nginx.conf:/etc/nginx/nginx.conf:ro" diff --git a/dcompose-stack/radar-cp-hadoop-stack/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/env.template rename to dcompose-stack/radar-cp-hadoop-stack/etc/env.template diff --git a/dcompose-stack/radar-cp-hadoop-stack/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/nginx.conf rename to dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf diff --git a/dcompose-stack/radar-cp-hadoop-stack/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/radar.yml rename to dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-high.properties similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-high.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-high.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-low.properties similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-low.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-low.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-med.properties similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/sink-hdfs-med.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs-med.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/sink-hdfs.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/sink-mongo.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/smtp.env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/smtp.env.template rename to dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template From 6005b9911604ab71a5c5d6cc8431539dadc1d4f2 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 11:10:40 +0200 Subject: [PATCH 116/197] Added HDFS restructure script --- .../{extract_from_hdfs.sh => hdfs_extract.sh} | 4 ++- .../radar-cp-hadoop-stack/hdfs_restructure.sh | 31 +++++++++++++++++++ .../radar-cp-hadoop-stack/lib/.gitignore | 1 + 3 files changed, 35 insertions(+), 1 deletion(-) rename dcompose-stack/radar-cp-hadoop-stack/{extract_from_hdfs.sh => hdfs_extract.sh} (72%) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh create mode 100644 dcompose-stack/radar-cp-hadoop-stack/lib/.gitignore diff --git a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh b/dcompose-stack/radar-cp-hadoop-stack/hdfs_extract.sh similarity index 72% rename from dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh rename to dcompose-stack/radar-cp-hadoop-stack/hdfs_extract.sh index 5f72edb3a..fd72c5926 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/extract_from_hdfs.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs_extract.sh @@ -5,6 +5,8 @@ if [[ $# -lt 1 || $1 = "-h" || $1 = "--help" ]]; then exit 1 fi +. ./util.sh + # HDFS filename to get HDFS_FILE=$1 # Absolute directory to write output to @@ -16,4 +18,4 @@ HDFS_OUTPUT_DIR=/home/output HDFS_COMMAND="hdfs dfs -get $HDFS_FILE $HDFS_OUTPUT_DIR" mkdir -p $OUTPUT_DIR -docker run --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://hdfs-namenode:8020 uhopper/hadoop:2.7.2 $HDFS_COMMAND +sudo-linux docker run -i --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -e CLUSTER_NAME=radar-cns -e CORE_CONF_fs_defaultFS=hdfs://hdfs-namenode:8020 uhopper/hadoop:2.7.2 $HDFS_COMMAND diff --git a/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh new file mode 100755 index 000000000..fc799a636 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +if [[ $# -lt 1 || $1 = "-h" || $1 = "--help" ]]; then + printf "Usage:\n$0 []\nThe destination directory defaults to ./output\n" + exit 1 +fi + +. ./util.sh + +# HDFS restructure version +JAR_VERSION=0.1 +# HDFS restructure JAR +JAR="restructurehdfs-all-${JAR_VERSION}.jar" + +if [ ! -e "lib/${JAR}" ]; then + echo "Downloading HDFS restructuring JAR" + sudo-linux curl -L -# -o lib/${JAR} "https://github.com/RADAR-CNS/Restructure-HDFS-topic/releases/download/v${JAR_VERSION}/${JAR}" +fi + +# HDFS filename to get +HDFS_FILE=$1 +# Absolute directory to write output to +OUTPUT_DIR=${2:-output} +OUTPUT_DIR="$(cd "$(dirname "$OUTPUT_DIR")"; pwd)/$(basename "$OUTPUT_DIR")" +# Internal docker directory to write output to +HDFS_OUTPUT_DIR=/output +# HDFS command to run +HDFS_COMMAND="java -jar /${JAR} hdfs://hdfs-namenode:8020 $HDFS_FILE $HDFS_OUTPUT_DIR" + +mkdir -p $OUTPUT_DIR +sudo-linux docker run -i --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -v "$PWD/lib/${JAR}:/${JAR}" openjdk:8-jre-alpine $HDFS_COMMAND diff --git a/dcompose-stack/radar-cp-hadoop-stack/lib/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/lib/.gitignore new file mode 100644 index 000000000..d392f0e82 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/lib/.gitignore @@ -0,0 +1 @@ +*.jar From 3a2aee49c1b4cc70a1e0b8a7e9913d79eb224697 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 11:13:56 +0200 Subject: [PATCH 117/197] Updated HDFS restructure script location --- dcompose-stack/radar-cp-hadoop-stack/README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 15322ce77..9b63c2988 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -16,9 +16,16 @@ Run ``` to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. -Data can be extracted from this setup by running: +Raw data can be extracted from this setup by running: ```shell -./extract_from_hdfs +./hdfs_extract.sh ``` This command will not overwrite data in the destination directory. + +CSV-structured data can be gotten from HDFS by running + +```shell +./hdfs_restructure.sh /topicAndroidNew +``` +This will put all CSV files in the destination directory, with subdirectory structure `PatientId/SensorType/Date_Hour.csv`. From e135e4be424bcface0cd3fcca49c0cc89335ae0e Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 11:19:06 +0200 Subject: [PATCH 118/197] Updated build script for new smtp.env location --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f8bbfe419..d7f02fe94 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,7 @@ script: - export HDFS_DATA_DIR_2=$PWD/hdfs-data2 - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > smtp.env + - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > etc/smtp.env - sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo $HOME/bin/docker-compose down - sudo docker network rm hadoop From 4af36ee27adcda0e30b8f4bb8553af051bd75a9f Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 1 May 2017 12:30:40 +0200 Subject: [PATCH 119/197] Fixed last shell script to use sudo-linux --- dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh index b88ec68d3..a150c8e02 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/start-radar-stack.sh @@ -3,4 +3,4 @@ . ./util.sh echo "==> Starting radar-cns platform" -sudo-docker-compose start +sudo-linux docker-compose start From 057950240e0b2ff0e66e498a091d3b3e09cc5552 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 2 May 2017 14:56:24 +0200 Subject: [PATCH 120/197] Restart zookeeper and kafka-3 on failure --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index d82297df0..9143d6e00 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -34,6 +34,7 @@ services: volumes: - /var/lib/zookeeper/data - /var/lib/zookeeper/logs + restart: always environment: ZOOKEEPER_SERVER_ID: 1 ZOOKEEPER_CLIENT_PORT: 2181 @@ -86,6 +87,7 @@ services: - kafka-3-data:/var/lib/kafka/data depends_on: - kafka-2 + restart: always environment: KAFKA_BROKER_ID: 3 KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 From 4d688930fd10b5614598f4d9834245f6415406e6 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 2 May 2017 15:47:13 +0200 Subject: [PATCH 121/197] Fixed etc/ paths in install script --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 45d836f46..6ca890b34 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -19,16 +19,16 @@ fi echo "==> Setting MongoDB Connector" # Update sink-mongo.properties -inline_variable 'mongo.username=' $HOTSTORAGE_USERNAME sink-mongo.properties -inline_variable 'mongo.password=' $HOTSTORAGE_PASSWORD sink-mongo.properties -inline_variable 'mongo.database=' $HOTSTORAGE_NAME sink-mongo.properties -inline_variable 'server_name[[:space:]]*' $SERVER_NAME nginx.conf +inline_variable 'mongo.username=' $HOTSTORAGE_USERNAME etc/sink-mongo.properties +inline_variable 'mongo.password=' $HOTSTORAGE_PASSWORD etc/sink-mongo.properties +inline_variable 'mongo.database=' $HOTSTORAGE_NAME etc/sink-mongo.properties +inline_variable 'server_name[[:space:]]*' $SERVER_NAME etc/nginx.conf # Set topics -inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" sink-mongo.properties +inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" etc/sink-mongo.properties echo "==> Setting HDFS Connector" -inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" sink-hdfs.properties +inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d From f052e56e5388c4dfea39a735fba0532cfac6f2d9 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 2 May 2017 15:47:36 +0200 Subject: [PATCH 122/197] Explicitly not exposing docker-compose networks --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 9143d6e00..d44198901 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -4,18 +4,24 @@ version: '2.1' networks: zookeeper: driver: bridge + internal: true kafka: driver: bridge + internal: true api: driver: bridge + internal: true hadoop: external: true mail: driver: bridge + internal: true monitor: driver: bridge + internal: true hotstorage: driver: bridge + internal: true volumes: kafka-1-data: {} @@ -144,6 +150,7 @@ services: - kafka - zookeeper depends_on: + - zookeeper-1 - kafka-1 - kafka-2 - kafka-3 @@ -408,6 +415,7 @@ services: image: google/cadvisor:v0.24.1 networks: - monitor + - default ports: - "8080:8080" volumes: @@ -434,6 +442,7 @@ services: networks: - api - monitor + - default depends_on: - cadvisor - portainer From b38fb2b8622f036a86f77bd608f71881f583768f Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 2 May 2017 15:48:49 +0200 Subject: [PATCH 123/197] Simpler nginx rewrite syntax --- .../radar-cp-hadoop-stack/etc/nginx.conf | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf index 7a53b0b70..4ea0eb56c 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf @@ -20,42 +20,30 @@ http { error_log /var/log/nginx/error.log; location /kafka/ { - rewrite /kafka/(.*) /$1 break; - proxy_pass http://rest-proxy-1:8082; - proxy_redirect off; + proxy_pass http://rest-proxy-1:8082/; proxy_set_header Host $host; } location /schema/ { - rewrite /schema/(.*) /$1 break; - proxy_pass http://schema-registry-1:8081; - proxy_redirect off; + proxy_pass http://schema-registry-1:8081/; proxy_set_header Host $host; } location /dashboard/ { - rewrite /dashboard(.*) /$1 break; - proxy_pass http://dashboard:3000; - proxy_redirect off; + proxy_pass http://dashboard:3000/; proxy_set_header Host $host; } location /portainer/ { - rewrite /portainer/(.*) /$1 break; - proxy_pass http://portainer:9000; - proxy_redirect off; + proxy_pass http://portainer:9000/; proxy_http_version 1.1; proxy_set_header Connection ""; } location /portainer/api/websocket/ { - rewrite /portainer/api/websocket/(.*) /$1 break; - proxy_pass http://portainer:9000; - proxy_redirect off; + proxy_pass http://portainer:9000/api/websocket/; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_http_version 1.1; } location /api/ { - rewrite /api/(.*) /radar/api/$1 break; - proxy_pass http://rest-api:8080; - proxy_redirect off; + proxy_pass http://rest-api:8080/; proxy_set_header Host $host; } } From 0ba3f8d6625205e25375d46999091e9c49bfc59f Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 2 May 2017 15:49:46 +0200 Subject: [PATCH 124/197] Kafka init to start at root --- .../radar-cp-hadoop-stack/kafka-radarinit/Dockerfile | 6 +++--- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile index 9ce900c4b..de2e65d87 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/Dockerfile @@ -1,7 +1,7 @@ FROM confluentinc/cp-kafka:3.1.2-1 # Copy bash file -COPY ./topic_init.sh /home/ -RUN chmod +x /home/topic_init.sh +COPY ./topic_init.sh / +RUN chmod +x /topic_init.sh -CMD ["./home/topic_init.sh", "shutdown -h now"] +CMD ["/topic_init.sh", "shutdown -h now"] diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 83a813653..539ed0c01 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -1,14 +1,14 @@ #!/bin/bash # Check if first execution -if [ -f /home/.radar_topic_set ]; then +if [ -f /.radar_topic_set ]; then echo "*********************************************" echo "** RADAR-CNS topics are ready to be used **" echo "*********************************************" exit 0 fi -# Wait untill all brokers are up & running +# Wait until all brokers are up & running interval=1 while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers) @@ -58,7 +58,7 @@ do kafka-topics --zookeeper $KAFKA_ZOOKEEPER_CONNECT --create --topic $element --partitions $RADAR_PARTITIONS --replication-factor $RADAR_REPLICATION_FACTOR --if-not-exists done -touch /home/.radar_topic_set +touch /.radar_topic_set echo "Topics created!" From c806ae65eeccedd574c2bcad60016b7ec3998a8e Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Thu, 4 May 2017 12:45:51 +0200 Subject: [PATCH 125/197] Binding to all interfaces with Kafka REST proxy --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index d44198901..67b54d9be 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -117,7 +117,7 @@ services: environment: SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper-1:2181 SCHEMA_REGISTRY_HOST_NAME: schema-registry-1 - SCHEMA_REGISTRY_LISTENERS: http://schema-registry-1:8081 + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 #---------------------------------------------------------------------------# # REST proxy # @@ -136,7 +136,7 @@ services: restart: always environment: KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper-1:2181 - KAFKA_REST_LISTENERS: http://rest-proxy-1:8082 + KAFKA_REST_LISTENERS: http://0.0.0.0:8082 KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry-1:8081 KAFKA_REST_HOST_NAME: rest-proxy-1 From 9a5487ba65d84e1c1ffd7ad1db09831327f0c7a4 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Thu, 4 May 2017 15:58:19 +0200 Subject: [PATCH 126/197] Remove git updates about generated code --- dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore | 2 ++ ...dfs.properties => sink-hdfs.properties.template} | 0 ...go.properties => sink-mongo.properties.template} | 0 .../radar-cp-hadoop-stack/install-radar-stack.sh | 13 ++++++++++++- 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore rename dcompose-stack/radar-cp-hadoop-stack/etc/{sink-hdfs.properties => sink-hdfs.properties.template} (100%) rename dcompose-stack/radar-cp-hadoop-stack/etc/{sink-mongo.properties => sink-mongo.properties.template} (100%) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore new file mode 100644 index 000000000..8ef584875 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore @@ -0,0 +1,2 @@ +/sink-mongo.properties +/sink-hdfs.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties rename to dcompose-stack/radar-cp-hadoop-stack/etc/sink-mongo.properties.template diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 6ca890b34..1e99cb244 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -19,16 +19,27 @@ fi echo "==> Setting MongoDB Connector" # Update sink-mongo.properties +if [ ! -e etc/sink-mongo.properties ]; then + cp etc/sink-mongo.properties.template etc/sink-mongo.properties +fi inline_variable 'mongo.username=' $HOTSTORAGE_USERNAME etc/sink-mongo.properties inline_variable 'mongo.password=' $HOTSTORAGE_PASSWORD etc/sink-mongo.properties inline_variable 'mongo.database=' $HOTSTORAGE_NAME etc/sink-mongo.properties -inline_variable 'server_name[[:space:]]*' $SERVER_NAME etc/nginx.conf # Set topics inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" etc/sink-mongo.properties echo "==> Setting HDFS Connector" +if [ ! -e etc/sink-hdfs.properties ]; then + cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties +fi inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties +echo "==> Setting nginx" +if [ ! -e etc/nginx.conf ]; then + cp etc/nginx.conf.template etc/nginx.conf +fi +inline_variable 'server_name[[:space:]]*' $SERVER_NAME etc/nginx.conf + echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d From 3feba1cd2064a4c181bc0e5d30065ae49948a27e Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 09:33:08 +0200 Subject: [PATCH 127/197] Proper nginx user name (fixes #33) --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 1e99cb244..8325411f1 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -39,7 +39,7 @@ echo "==> Setting nginx" if [ ! -e etc/nginx.conf ]; then cp etc/nginx.conf.template etc/nginx.conf fi -inline_variable 'server_name[[:space:]]*' $SERVER_NAME etc/nginx.conf +inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d From 26b8f1bce10ad6bd2588cbcaf7d5640c080366fa Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 09:36:40 +0200 Subject: [PATCH 128/197] Fixed nginx template --- dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore | 1 + .../etc/{nginx.conf => nginx.conf.template} | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) rename dcompose-stack/radar-cp-hadoop-stack/etc/{nginx.conf => nginx.conf.template} (98%) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore index 8ef584875..3d8ea5977 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore @@ -1,2 +1,3 @@ /sink-mongo.properties /sink-hdfs.properties +/nginx.conf diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template similarity index 98% rename from dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf rename to dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 4ea0eb56c..11958917e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -15,7 +15,7 @@ http { server { # simple reverse-proxy listen 80; - server_name localhost + server_name localhost; access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; From fca1abc6be2be0c082ed6d6e98ca55117d502eff Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 09:42:03 +0200 Subject: [PATCH 129/197] Made tempate copying a function --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 12 +++--------- dcompose-stack/radar-cp-hadoop-stack/util.sh | 11 ++++++++++- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 8325411f1..ea251d009 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -19,9 +19,7 @@ fi echo "==> Setting MongoDB Connector" # Update sink-mongo.properties -if [ ! -e etc/sink-mongo.properties ]; then - cp etc/sink-mongo.properties.template etc/sink-mongo.properties -fi +copy_template_if_absent etc/sink-mongo.properties inline_variable 'mongo.username=' $HOTSTORAGE_USERNAME etc/sink-mongo.properties inline_variable 'mongo.password=' $HOTSTORAGE_PASSWORD etc/sink-mongo.properties inline_variable 'mongo.database=' $HOTSTORAGE_NAME etc/sink-mongo.properties @@ -30,15 +28,11 @@ inline_variable 'mongo.database=' $HOTSTORAGE_NAME etc/sink-mongo.properties inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" etc/sink-mongo.properties echo "==> Setting HDFS Connector" -if [ ! -e etc/sink-hdfs.properties ]; then - cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties -fi +copy_template_if_absent etc/sink-hdfs.properties inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Setting nginx" -if [ ! -e etc/nginx.conf ]; then - cp etc/nginx.conf.template etc/nginx.conf -fi +copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf echo "==> Starting RADAR-CNS Platform" diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 0614ab902..c8b0312f2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -39,7 +39,16 @@ inline_variable() { if [ $(uname) == "Darwin" ]; then sed -i '' 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 else - sed -i -- 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 + sudo sed -i -- 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 + fi +} + +# Copies the template (defined by the given config file with suffix +# ".template") to intended configuration file, if the file does not +# yet exist. +copy_template_if_absent() { + if [ ! -e $1 ]; then + sudo-linux cp -p "${1}.template" "$1" fi } From 8817143ebaad5a034b982277d5f7d85e880ce89f Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 09:51:03 +0200 Subject: [PATCH 130/197] [Travis] move templates to working config files --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index d7f02fe94..112a6f2df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,9 @@ script: - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > etc/smtp.env + - cp etc/nginx.conf.template etc/nginx.template + - cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties + - cp etc/sink-mongo.properties.template etc/sink-mongo.properties - sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] - sudo $HOME/bin/docker-compose down - sudo docker network rm hadoop From 13e944ac243a1c06053a33b73e8e7ac23aa9c328 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 09:59:40 +0200 Subject: [PATCH 131/197] [Travis] nginx conf typo --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 112a6f2df..88ced17ea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,7 @@ script: - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > etc/smtp.env - - cp etc/nginx.conf.template etc/nginx.template + - cp etc/nginx.conf.template etc/nginx.conf - cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties - cp etc/sink-mongo.properties.template etc/sink-mongo.properties - sudo $HOME/bin/docker-compose up -d --build && sleep 15 && [ -z "$($HOME/bin/docker-compose ps | tail -n +3 | grep " Exit ")" ] From 474bc40572a5ebbe737887cff667a00fd244f2e6 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 14:43:27 +0200 Subject: [PATCH 132/197] Added SUCCESS/FAILURE text and exit code in scripts Also some utility documentation --- .../install-radar-stack.sh | 1 + dcompose-stack/radar-cp-hadoop-stack/util.sh | 30 +++++++++++++++---- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index ea251d009..d360fa2f7 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -37,3 +37,4 @@ inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d +echo "### SUCCESS ###" diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index c8b0312f2..471d08613 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -1,5 +1,16 @@ #!/bin/bash +# this will trap any errors or commands with non-zero exit status +# by calling function catch_errors() +trap catch_errors ERR; + +function catch_errors() { + exit_code=$? + echo "### FAILURE ###"; + exit $exit_code; +} + +# Check whether given command exists and call it with the --version flag. check_command_exists() { if command -v "$1" > /dev/null 2>&1; then echo "$1 version: $($1 --version)" @@ -9,6 +20,8 @@ check_command_exists() { fi } +# Check if the parent directory of given variable is set. Usage: +# check_parent_exists MY_PATH_VAR $MY_PATH_VAR check_parent_exists() { if [ -z "$2" ]; then echo "Directory variable $1 is not set in .env" @@ -20,6 +33,8 @@ check_parent_exists() { fi } +# sudo if on Linux, not on OS X +# useful for docker, which doesn't need sudo on OS X sudo-linux() { if [ $(uname) == "Darwin" ]; then "$@" @@ -28,6 +43,15 @@ sudo-linux() { fi } +# OS X/linux portable sed -i +sed_i() { + if [ $(uname) == "Darwin" ]; then + sed -i '' "$@" + else + sudo sed -i -- "$@" + fi +} + # Inline variable into a file, keeping indentation. # Usage: # inline_variable VARIABLE_SET VALUE FILE @@ -36,11 +60,7 @@ sudo-linux() { # inline_variable 'a=' 123 test.txt # will replace a line ' a=232 ' with ' a=123' inline_variable() { - if [ $(uname) == "Darwin" ]; then - sed -i '' 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 - else - sudo sed -i -- 's/^\([[:space:]]*'$1'\).*$/\1'$2'/' $3 - fi + sed_i 's|^\([[:space:]]*'$1'\).*$|\(\1\)'$2'|' $3 } # Copies the template (defined by the given config file with suffix From c13827fd58685ff5672322b7b0acd91232c379c8 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 16:07:29 +0200 Subject: [PATCH 133/197] First implementation to use self-signed certificates or letsencrypt --- .../radar-cp-hadoop-stack/docker-compose.yml | 7 +++ .../etc/nginx.conf.template | 54 ++++++++++++++++--- .../install-radar-stack.sh | 6 +++ .../renew_ssl_certificate.sh | 10 ++++ dcompose-stack/radar-cp-hadoop-stack/util.sh | 42 ++++++++++++++- 5 files changed, 111 insertions(+), 8 deletions(-) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 67b54d9be..5bfc9222f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -27,6 +27,10 @@ volumes: kafka-1-data: {} kafka-2-data: {} kafka-3-data: {} + certs: + external: true + certs-data: + external: true services: @@ -452,5 +456,8 @@ services: - dashboard ports: - "80:80" + - "443:443" volumes: + - certs:/etc/letsencrypt + - certs-data:/data/letsencrypt - "./etc/nginx.conf:/etc/nginx/nginx.conf:ro" diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 11958917e..1e6a1fb91 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -13,11 +13,51 @@ http { '"$http_user_agent" "$http_x_forwarded_for"'; tcp_nodelay on; + # hide nginx version + server_tokens off; + + # add nosniff header (https://www.owasp.org/index.php/List_of_useful_HTTP_headers) + add_header X-Content-Type-Options nosniff; + + server { + listen 80 default_server; + listen [::]:80 default_server; + server_name localhost; + location / { + return 301 https://$server_name$request_uri; + } + location ^~ /.well-known { + allow all; + root /data/letsencrypt/; + } + } + server { # simple reverse-proxy - listen 80; - server_name localhost; - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; + listen 443 ssl http2 default_server; + listen [::]:443 ssl http2 default_server; + server_name localhost; + + ssl on; + + add_header Strict-Transport-Security "max-age=31536000" always; + + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_prefer_server_ciphers on; + ssl_ciphers "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:!ADH:!AECDH:!MD5;"; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4; + + ssl_certificate /etc/letsencrypt/live/localhost/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/localhost/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/localhost/chain.pem; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; location /kafka/ { proxy_pass http://rest-proxy-1:8082/; @@ -34,12 +74,12 @@ http { location /portainer/ { proxy_pass http://portainer:9000/; proxy_http_version 1.1; - proxy_set_header Connection ""; + proxy_set_header Connection ""; } location /portainer/api/websocket/ { proxy_pass http://portainer:9000/api/websocket/; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; proxy_http_version 1.1; } location /api/ { diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index d360fa2f7..77b120152 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -9,6 +9,10 @@ check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} check_parent_exists MONGODB_DIR ${MONGODB_DIR} +if [ -z "$SERVER_NAME" ]; then + echo "Set SERVER_NAME variable in .env" +fi + if [ -z $(sudo-linux docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then echo "==> Creating docker network - hadoop" sudo-linux docker network create hadoop @@ -34,6 +38,8 @@ inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Setting nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf +sed_i 's|\(/etc/letsencrypt/live/\)[^/]*/\(.*\.pem\)|\(\1\)'$SERVER_NAME'\2|' etc/nginx.conf +request_certificate "$SERVER_NAME" echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh new file mode 100755 index 000000000..b6bfe5892 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +. ./util.sh +. ./.env + +if [ -z "$SERVER_NAME" ]; then + echo "Set SERVER_NAME variable in .env" +fi + +request_certificate $SERVER_NAME force diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 471d08613..00c49b909 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -67,8 +67,48 @@ inline_variable() { # ".template") to intended configuration file, if the file does not # yet exist. copy_template_if_absent() { - if [ ! -e $1 ]; then + if [ ! -e "$1" ]; then sudo-linux cp -p "${1}.template" "$1" + elif [ "$1" -ot "${1}.template" ]; then + echo "Configuration file ${1} is older than its template ${1}.template." + echo "Please edit ${1} to ensure it matches the template, remove it or" + echo "run touch on it." + exit 1 + fi +} + +request_certificate() { + SERVER_NAME=$1 + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" + if sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '$SSL_PATH/chain.pem' ]"; then + KEY_EXISTS=1 + if [ "$2" = "force" ]; then + echo " WARN: SSL certificate already existed, renewing" + else + echo " SSL certificate already exists, not recreating" + return + fi + else + KEY_EXISTS=0 + fi + + if [ "$SERVER_NAME" = "localhost" ]; then + echo "==> Generating self-signed certificate" + sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ + /bin/sh -c "mkdir -p $SSL_PATH && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '$SSL_PATH/privkey.pem' -out '$SSL_PATH/chain.pem' -days 3650 -nodes -sha256 && cp $SSL_PATH/chain.pem $SSL_PATH/fullchain.pem" + else + echo "==> Requesting Let's Encrypt SSL certificate" + CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) + CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d $SERVER_NAME) + if [ $KEY_EXISTS -eq 0 ]; then + # request key for the first time + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" + else + # renew key + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" renew "${CERTBOT_OPTS[@]}" + # Reload webserver configuration + sudo-linux docker-compose kill -s HUP webserver + fi fi } From 38824718e44ede7ce473ae59e4fe7b1f4e9118c5 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 16:18:10 +0200 Subject: [PATCH 134/197] Moved radar.yml to radar.yml.template --- .travis.yml | 1 + dcompose-stack/radar-cp-hadoop-stack/README.md | 2 +- .../radar-cp-hadoop-stack/etc/{radar.yml => radar.yml.template} | 0 3 files changed, 2 insertions(+), 1 deletion(-) rename dcompose-stack/radar-cp-hadoop-stack/etc/{radar.yml => radar.yml.template} (100%) diff --git a/.travis.yml b/.travis.yml index 88ced17ea..1b85d2101 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,7 @@ script: - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > etc/smtp.env + - cp etc/radar.yml.template etc/radar.yml - cp etc/nginx.conf.template etc/nginx.conf - cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties - cp etc/sink-mongo.properties.template etc/sink-mongo.properties diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 9b63c2988..0090a2b09 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -6,7 +6,7 @@ First move `etc/env.template` file to `./.env` and check and modify all its vari Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. -Finally, edit `etc/radar.yml`, especially concerning the monitor email address configuration. +Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. ## Usage diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml b/dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml.template similarity index 100% rename from dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml rename to dcompose-stack/radar-cp-hadoop-stack/etc/radar.yml.template From f6fdc3fd601b0fd670961b06f8b932cfca9e28ee Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 8 May 2017 16:07:29 +0200 Subject: [PATCH 135/197] First implementation to use self-signed certificates or letsencrypt --- .../radar-cp-hadoop-stack/docker-compose.yml | 7 +++ .../etc/nginx.conf.template | 54 ++++++++++++++++--- .../install-radar-stack.sh | 6 +++ .../renew_ssl_certificate.sh | 10 ++++ dcompose-stack/radar-cp-hadoop-stack/util.sh | 42 ++++++++++++++- 5 files changed, 111 insertions(+), 8 deletions(-) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 67b54d9be..5bfc9222f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -27,6 +27,10 @@ volumes: kafka-1-data: {} kafka-2-data: {} kafka-3-data: {} + certs: + external: true + certs-data: + external: true services: @@ -452,5 +456,8 @@ services: - dashboard ports: - "80:80" + - "443:443" volumes: + - certs:/etc/letsencrypt + - certs-data:/data/letsencrypt - "./etc/nginx.conf:/etc/nginx/nginx.conf:ro" diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 11958917e..1e6a1fb91 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -13,11 +13,51 @@ http { '"$http_user_agent" "$http_x_forwarded_for"'; tcp_nodelay on; + # hide nginx version + server_tokens off; + + # add nosniff header (https://www.owasp.org/index.php/List_of_useful_HTTP_headers) + add_header X-Content-Type-Options nosniff; + + server { + listen 80 default_server; + listen [::]:80 default_server; + server_name localhost; + location / { + return 301 https://$server_name$request_uri; + } + location ^~ /.well-known { + allow all; + root /data/letsencrypt/; + } + } + server { # simple reverse-proxy - listen 80; - server_name localhost; - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; + listen 443 ssl http2 default_server; + listen [::]:443 ssl http2 default_server; + server_name localhost; + + ssl on; + + add_header Strict-Transport-Security "max-age=31536000" always; + + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_prefer_server_ciphers on; + ssl_ciphers "ECDH+AESGCM:ECDH+AES256:ECDH+AES128:!ADH:!AECDH:!MD5;"; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4; + + ssl_certificate /etc/letsencrypt/live/localhost/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/localhost/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/localhost/chain.pem; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; location /kafka/ { proxy_pass http://rest-proxy-1:8082/; @@ -34,12 +74,12 @@ http { location /portainer/ { proxy_pass http://portainer:9000/; proxy_http_version 1.1; - proxy_set_header Connection ""; + proxy_set_header Connection ""; } location /portainer/api/websocket/ { proxy_pass http://portainer:9000/api/websocket/; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; proxy_http_version 1.1; } location /api/ { diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index d360fa2f7..77b120152 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -9,6 +9,10 @@ check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} check_parent_exists MONGODB_DIR ${MONGODB_DIR} +if [ -z "$SERVER_NAME" ]; then + echo "Set SERVER_NAME variable in .env" +fi + if [ -z $(sudo-linux docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then echo "==> Creating docker network - hadoop" sudo-linux docker network create hadoop @@ -34,6 +38,8 @@ inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Setting nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf +sed_i 's|\(/etc/letsencrypt/live/\)[^/]*/\(.*\.pem\)|\(\1\)'$SERVER_NAME'\2|' etc/nginx.conf +request_certificate "$SERVER_NAME" echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d diff --git a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh new file mode 100755 index 000000000..b6bfe5892 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +. ./util.sh +. ./.env + +if [ -z "$SERVER_NAME" ]; then + echo "Set SERVER_NAME variable in .env" +fi + +request_certificate $SERVER_NAME force diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 471d08613..00c49b909 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -67,8 +67,48 @@ inline_variable() { # ".template") to intended configuration file, if the file does not # yet exist. copy_template_if_absent() { - if [ ! -e $1 ]; then + if [ ! -e "$1" ]; then sudo-linux cp -p "${1}.template" "$1" + elif [ "$1" -ot "${1}.template" ]; then + echo "Configuration file ${1} is older than its template ${1}.template." + echo "Please edit ${1} to ensure it matches the template, remove it or" + echo "run touch on it." + exit 1 + fi +} + +request_certificate() { + SERVER_NAME=$1 + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" + if sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '$SSL_PATH/chain.pem' ]"; then + KEY_EXISTS=1 + if [ "$2" = "force" ]; then + echo " WARN: SSL certificate already existed, renewing" + else + echo " SSL certificate already exists, not recreating" + return + fi + else + KEY_EXISTS=0 + fi + + if [ "$SERVER_NAME" = "localhost" ]; then + echo "==> Generating self-signed certificate" + sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ + /bin/sh -c "mkdir -p $SSL_PATH && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '$SSL_PATH/privkey.pem' -out '$SSL_PATH/chain.pem' -days 3650 -nodes -sha256 && cp $SSL_PATH/chain.pem $SSL_PATH/fullchain.pem" + else + echo "==> Requesting Let's Encrypt SSL certificate" + CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) + CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d $SERVER_NAME) + if [ $KEY_EXISTS -eq 0 ]; then + # request key for the first time + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" + else + # renew key + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" renew "${CERTBOT_OPTS[@]}" + # Reload webserver configuration + sudo-linux docker-compose kill -s HUP webserver + fi fi } From 0f12c6d9cd1eec08ce2dbc8de3506e432f449f2b Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 08:33:32 +0200 Subject: [PATCH 136/197] [Travis] Create external volumes before test --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 1b85d2101..baccb74c0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,8 @@ script: - export HDFS_NAME_DIR_1=$PWD/hdfs-name1 - export HDFS_NAME_DIR_2=$PWD/hdfs-name2 - echo $"SMARTHOST_ADDRESS=mail.example.com\nSMARTHOST_PORT=587\nSMARTHOST_USER=user@example.com\nSMARTHOST_PASSWORD=XXXXXXXX" > etc/smtp.env + - sudo docker volume create certs + - sudo docker volume create certs-data - cp etc/radar.yml.template etc/radar.yml - cp etc/nginx.conf.template etc/nginx.conf - cp etc/sink-hdfs.properties.template etc/sink-hdfs.properties From d265b029b7d96677741764ed95c75933163646f7 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 08:36:19 +0200 Subject: [PATCH 137/197] Ignore implementation of the radar.yml file --- dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore index 3d8ea5977..79f6c9f15 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore @@ -1,3 +1,4 @@ /sink-mongo.properties /sink-hdfs.properties /nginx.conf +/radar.yml From 012753bfb67e23a1b6bf5943234fa5ba2d87df80 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 08:42:58 +0200 Subject: [PATCH 138/197] Fixed sed replacement syntax --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 5 +++-- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 77b120152..870bbc0ef 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -38,9 +38,10 @@ inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Setting nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf -sed_i 's|\(/etc/letsencrypt/live/\)[^/]*/\(.*\.pem\)|\(\1\)'$SERVER_NAME'\2|' etc/nginx.conf -request_certificate "$SERVER_NAME" +sed_i 's|\(/etc/letsencrypt/live/\)[^/]*/\(.*\.pem\)|\1'$SERVER_NAME'\2|' etc/nginx.conf echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d + +request_certificate "$SERVER_NAME" echo "### SUCCESS ###" diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 00c49b909..7c5ff8204 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -60,7 +60,7 @@ sed_i() { # inline_variable 'a=' 123 test.txt # will replace a line ' a=232 ' with ' a=123' inline_variable() { - sed_i 's|^\([[:space:]]*'$1'\).*$|\(\1\)'$2'|' $3 + sed_i 's|^\([[:space:]]*'$1'\).*$|\1'$2'|' $3 } # Copies the template (defined by the given config file with suffix From 0b7c5a1b37d1612bfde3ce92048a45d0241a32d4 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 08:47:46 +0200 Subject: [PATCH 139/197] Fixed sed typo --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 870bbc0ef..1a3221f2b 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -20,7 +20,7 @@ else echo "==> Creating docker network - hadoop ALREADY EXISTS" fi -echo "==> Setting MongoDB Connector" +echo "==> Configuring MongoDB Connector" # Update sink-mongo.properties copy_template_if_absent etc/sink-mongo.properties @@ -31,14 +31,16 @@ inline_variable 'mongo.database=' $HOTSTORAGE_NAME etc/sink-mongo.properties # Set topics inline_variable 'topics=' "${RADAR_AGG_TOPIC_LIST}" etc/sink-mongo.properties -echo "==> Setting HDFS Connector" +echo "==> Configuring HDFS Connector" copy_template_if_absent etc/sink-hdfs.properties inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties -echo "==> Setting nginx" +echo "==> Configuring nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf -sed_i 's|\(/etc/letsencrypt/live/\)[^/]*/\(.*\.pem\)|\1'$SERVER_NAME'\2|' etc/nginx.conf +sed_i 's|\(/etc/letsencrypt/live/\)[^/]*\(/.*\.pem\)|\1'$SERVER_NAME'\2|' etc/nginx.conf +sudo-linux docker volume create certs +sudo-linux docker volume create certs-data echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d From 633af6027893e62fc24ef77f4cb96c6f1365840b Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 10:47:17 +0200 Subject: [PATCH 140/197] Using proper bash -z notation --- .../radar-cp-hadoop-stack/install-radar-stack.sh | 3 ++- .../radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh | 8 ++++---- .../radar-cp-hadoop-stack/renew_ssl_certificate.sh | 3 ++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 1a3221f2b..85b05a9e6 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -9,8 +9,9 @@ check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} check_parent_exists MONGODB_DIR ${MONGODB_DIR} -if [ -z "$SERVER_NAME" ]; then +if [ -z ${SERVER_NAME} ]; then echo "Set SERVER_NAME variable in .env" + exit 1 fi if [ -z $(sudo-linux docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 539ed0c01..8b14e3eb5 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -28,22 +28,22 @@ done # Check if variables exist -if [ -z "$RADAR_TOPICS" ]; then +if [ -z ${RADAR_TOPICS} ]; then echo "RADAR_TOPICS is not defined" exit 2 fi -if [ -z "$KAFKA_ZOOKEEPER_CONNECT" ]; then +if [ -z ${KAFKA_ZOOKEEPER_CONNECT} ]; then echo "KAFKA_ZOOKEEPER_CONNECT is not defined" exit 2 fi -if [ -z "$RADAR_PARTITIONS" ]; then +if [ -z ${RADAR_PARTITIONS} ]; then echo "RADAR_PARTITIONS is not defined" exit 2 fi -if [ -z "$RADAR_REPLICATION_FACTOR" ]; then +if [ -z ${RADAR_REPLICATION_FACTOR} ]; then echo "RADAR_REPLICATION_FACTOR is not defined" exit 2 fi diff --git a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh index b6bfe5892..3072fee29 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh @@ -3,8 +3,9 @@ . ./util.sh . ./.env -if [ -z "$SERVER_NAME" ]; then +if [ -z ${SERVER_NAME} ]; then echo "Set SERVER_NAME variable in .env" + exit 1 fi request_certificate $SERVER_NAME force From 7436f21956aeb571521265ddfe2dc3ef6f2840ee Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 10:48:48 +0200 Subject: [PATCH 141/197] Splitting initialization and request of certificate --- .../radar-cp-hadoop-stack/etc/env.template | 1 + .../install-radar-stack.sh | 5 +- .../renew_ssl_certificate.sh | 2 +- dcompose-stack/radar-cp-hadoop-stack/util.sh | 85 +++++++++++++------ 4 files changed, 65 insertions(+), 28 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index 9d093f8b2..270c8f438 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -1,4 +1,5 @@ SERVER_NAME=localhost +SELF_SIGNED_CERT=yes RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 85b05a9e6..802ac9ac6 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -40,11 +40,10 @@ echo "==> Configuring nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf sed_i 's|\(/etc/letsencrypt/live/\)[^/]*\(/.*\.pem\)|\1'$SERVER_NAME'\2|' etc/nginx.conf -sudo-linux docker volume create certs -sudo-linux docker volume create certs-data +init_certificate "${SERVER_NAME}" echo "==> Starting RADAR-CNS Platform" sudo-linux docker-compose up --force-recreate -d -request_certificate "$SERVER_NAME" +request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" echo "### SUCCESS ###" diff --git a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh index 3072fee29..052f97a83 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh @@ -8,4 +8,4 @@ if [ -z ${SERVER_NAME} ]; then exit 1 fi -request_certificate $SERVER_NAME force +request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERTIFICATE:-yes}" force diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 7c5ff8204..e3726d74a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -77,39 +77,76 @@ copy_template_if_absent() { fi } +self_signed_certificate() { + SERVER_NAME=$1 + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" + echo "==> Generating self-signed certificate" + sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ + /bin/sh -c "mkdir -p '${SSL_PATH}' && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/chain.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/chain.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" +} + +letsencrypt_certonly() { + SERVER_NAME=$1 + echo "==> Requesting Let's Encrypt SSL certificate for ${SERVER_NAME}" + CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) + CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" + sudo-linux docker run -i --rm -v certs:/etc/openssl alpine:3.5 \ + /bin/sh -c "mkdir -p '${SSL_PATH}' && touch '${SSL_PATH}/.letsencrypt'" +} + +letsencrypt_renew() { + SERVER_NAME=$1 + echo "==> Renewing Let's Encrypt SSL certificate for ${SERVER_NAME}" + CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) + CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") + sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" +} + +init_certificate() { + SERVER_NAME=$1 + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" + if sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ ! -e '${SSL_PATH}/chain.pem' ]"; then + self_signed_certificate "${SERVER_NAME}" + fi +} + request_certificate() { SERVER_NAME=$1 + SELF_SIGNED=$2 SSL_PATH="/etc/openssl/live/${SERVER_NAME}" - if sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '$SSL_PATH/chain.pem' ]"; then - KEY_EXISTS=1 - if [ "$2" = "force" ]; then - echo " WARN: SSL certificate already existed, renewing" + + init_certificate "${SERVER_NAME}" + CURRENT_CERT=$(sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '${SSL_PATH}/.letsencrypt ] && echo letsencrypt || echo self-signed") + + if [ $CURRENT_CERT = "self-signed" ]; then + if [ "${SELF_SIGNED}" = "yes" ]; then + if [ "$3" = "force" ]; then + echo "WARN: Self-signed SSL certificate already existed, recreating" + self_signed_certificate "${SERVER_NAME}" + else + echo "Self-signed SSL certificate exists, not recreating" + return + fi else - echo " SSL certificate already exists, not recreating" - return + letsencrypt_certonly "${SERVER_NAME}" fi else - KEY_EXISTS=0 - fi + if [ "$3" != "force" ]; then + echo "Let's Encrypt SSL certificate already exists, not renewing" + return + fi - if [ "$SERVER_NAME" = "localhost" ]; then - echo "==> Generating self-signed certificate" - sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ - /bin/sh -c "mkdir -p $SSL_PATH && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '$SSL_PATH/privkey.pem' -out '$SSL_PATH/chain.pem' -days 3650 -nodes -sha256 && cp $SSL_PATH/chain.pem $SSL_PATH/fullchain.pem" - else - echo "==> Requesting Let's Encrypt SSL certificate" - CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) - CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d $SERVER_NAME) - if [ $KEY_EXISTS -eq 0 ]; then - # request key for the first time - sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" - else - # renew key - sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" renew "${CERTBOT_OPTS[@]}" - # Reload webserver configuration - sudo-linux docker-compose kill -s HUP webserver + if [ "${SELF_SIGNED}" = "yes" ]; then + echo "Converting Let's Encrypt SSL certificate to a self-signed SSL" + self_signed_certificate "${SERVER_NAME}" + fi + if [ "$3" = "force"]; then + letsencrypt_renew "${SERVER_NAME}" fi fi + sudo-linux docker-compose kill -s HUP webserver } echo "OS version: $(uname -a)" From 703965d04ee95f14c8a4eb84c6719a229320079d Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 10:49:44 +0200 Subject: [PATCH 142/197] Quiet hadoop network creation --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 802ac9ac6..fb8f74214 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -16,7 +16,7 @@ fi if [ -z $(sudo-linux docker network ls --format '{{.Name}}' | grep "^hadoop$") ]; then echo "==> Creating docker network - hadoop" - sudo-linux docker network create hadoop + sudo-linux docker network create hadoop > /dev/null else echo "==> Creating docker network - hadoop ALREADY EXISTS" fi From 1c5b8dcf0805f2dabc758e19e9a659ad045c6b3c Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 10:51:34 +0200 Subject: [PATCH 143/197] Quoting variables in sed regex --- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index fb8f74214..c164286e6 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -39,7 +39,7 @@ inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties echo "==> Configuring nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf -sed_i 's|\(/etc/letsencrypt/live/\)[^/]*\(/.*\.pem\)|\1'$SERVER_NAME'\2|' etc/nginx.conf +sed_i 's|\(/etc/letsencrypt/live/\)[^/]*\(/.*\.pem\)|\1'"${SERVER_NAME}"'\2|' etc/nginx.conf init_certificate "${SERVER_NAME}" echo "==> Starting RADAR-CNS Platform" diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index e3726d74a..4d36f27c2 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -60,7 +60,7 @@ sed_i() { # inline_variable 'a=' 123 test.txt # will replace a line ' a=232 ' with ' a=123' inline_variable() { - sed_i 's|^\([[:space:]]*'$1'\).*$|\1'$2'|' $3 + sed_i 's|^\([[:space:]]*'"$1"'\).*$|\1'"$2"'|' "$3" } # Copies the template (defined by the given config file with suffix From 93573a1f488fed2b610246fe4c62b573d16a7869 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 10:57:10 +0200 Subject: [PATCH 144/197] Fixed bash quote and reordered request_certificate --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 28 ++++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 4d36f27c2..b26c22eaf 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -118,21 +118,9 @@ request_certificate() { SSL_PATH="/etc/openssl/live/${SERVER_NAME}" init_certificate "${SERVER_NAME}" - CURRENT_CERT=$(sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '${SSL_PATH}/.letsencrypt ] && echo letsencrypt || echo self-signed") + CURRENT_CERT=$(sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "[ -e '${SSL_PATH}/.letsencrypt' ] && echo letsencrypt || echo self-signed") - if [ $CURRENT_CERT = "self-signed" ]; then - if [ "${SELF_SIGNED}" = "yes" ]; then - if [ "$3" = "force" ]; then - echo "WARN: Self-signed SSL certificate already existed, recreating" - self_signed_certificate "${SERVER_NAME}" - else - echo "Self-signed SSL certificate exists, not recreating" - return - fi - else - letsencrypt_certonly "${SERVER_NAME}" - fi - else + if [ "${CURRENT_CERT}" = "letsencrypt" ]; then if [ "$3" != "force" ]; then echo "Let's Encrypt SSL certificate already exists, not renewing" return @@ -145,6 +133,18 @@ request_certificate() { if [ "$3" = "force"]; then letsencrypt_renew "${SERVER_NAME}" fi + else + if [ "${SELF_SIGNED}" = "yes" ]; then + if [ "$3" = "force" ]; then + echo "WARN: Self-signed SSL certificate already existed, recreating" + self_signed_certificate "${SERVER_NAME}" + else + echo "Self-signed SSL certificate exists, not recreating" + return + fi + else + letsencrypt_certonly "${SERVER_NAME}" + fi fi sudo-linux docker-compose kill -s HUP webserver } From c1d118dbca09934218c8fcd1a2e92df0bd9de975 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 11:13:22 +0200 Subject: [PATCH 145/197] Start letsencrypt from empty chain --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index b26c22eaf..3c953217f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -87,13 +87,17 @@ self_signed_certificate() { letsencrypt_certonly() { SERVER_NAME=$1 + SSL_PATH="/etc/openssl/live/${SERVER_NAME}" echo "==> Requesting Let's Encrypt SSL certificate for ${SERVER_NAME}" + + # start from a clean slate + sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/rm -rf "${SSL_PATH}" CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" - SSL_PATH="/etc/openssl/live/${SERVER_NAME}" - sudo-linux docker run -i --rm -v certs:/etc/openssl alpine:3.5 \ - /bin/sh -c "mkdir -p '${SSL_PATH}' && touch '${SSL_PATH}/.letsencrypt'" + + # mark the directory as letsencrypt dir + sudo-linux docker run -i --rm -v certs:/etc/openssl alpine:3.5 /usr/bin/touch "${SSL_PATH}/.letsencrypt" } letsencrypt_renew() { From c0969a1f18cb25f2f0b65a1182fb64999f0324bc Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 11:34:43 +0200 Subject: [PATCH 146/197] Remove all related letsencrypt folders if they exist --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 3c953217f..4be21d815 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -91,7 +91,10 @@ letsencrypt_certonly() { echo "==> Requesting Let's Encrypt SSL certificate for ${SERVER_NAME}" # start from a clean slate - sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/rm -rf "${SSL_PATH}" + sudo-linux docker-compose stop webserver + sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "find /etc/openssl -name '${SERVER_NAME}*' -exec rm -rf '{}' ';'" + sudo-linux docker-compose start webserver + CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" From 6867e29cbe1be044e1f0409d26f6abd9828bcb41 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 11:39:53 +0200 Subject: [PATCH 147/197] Slight find command optimization --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 4be21d815..caaf2be60 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -92,7 +92,7 @@ letsencrypt_certonly() { # start from a clean slate sudo-linux docker-compose stop webserver - sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "find /etc/openssl -name '${SERVER_NAME}*' -exec rm -rf '{}' ';'" + sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "find /etc/openssl -name '${SERVER_NAME}*' -prune -exec rm -rf '{}' +" sudo-linux docker-compose start webserver CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) From 5013470db36fb7154396e9e13aa8d51f7d319a79 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 11:43:19 +0200 Subject: [PATCH 148/197] Fixed touch path in alpine --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index caaf2be60..dff0a151e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -100,7 +100,7 @@ letsencrypt_certonly() { sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" # mark the directory as letsencrypt dir - sudo-linux docker run -i --rm -v certs:/etc/openssl alpine:3.5 /usr/bin/touch "${SSL_PATH}/.letsencrypt" + sudo-linux docker run -i --rm -v certs:/etc/openssl alpine:3.5 /bin/touch "${SSL_PATH}/.letsencrypt" } letsencrypt_renew() { From d4c165d939da503768120e85f394bb2f97f9cb48 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 11:58:32 +0200 Subject: [PATCH 149/197] Fixed SELF_SIGNED_CERT env name in renew_ssl_certificate.sh --- dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh index 052f97a83..fa18ead9a 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/renew_ssl_certificate.sh @@ -8,4 +8,4 @@ if [ -z ${SERVER_NAME} ]; then exit 1 fi -request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERTIFICATE:-yes}" force +request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" force From c2aeca97b443b9aa9b9482ca1d1011b165e9bcf6 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 12:01:02 +0200 Subject: [PATCH 150/197] Renew update --- dcompose-stack/radar-cp-hadoop-stack/README.md | 4 +++- dcompose-stack/radar-cp-hadoop-stack/util.sh | 5 ++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 0090a2b09..54a101c95 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -2,7 +2,7 @@ ## Configuration -First move `etc/env.template` file to `./.env` and check and modify all its variables. +First move `etc/env.template` file to `./.env` and check and modify all its variables. To have a valid HTTPS connection for a public host, set `SELF_SIGNED_CERT=no`. Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. @@ -29,3 +29,5 @@ CSV-structured data can be gotten from HDFS by running ./hdfs_restructure.sh /topicAndroidNew ``` This will put all CSV files in the destination directory, with subdirectory structure `PatientId/SensorType/Date_Hour.csv`. + +If `SELF_SIGNED_CERT=no` in `./.env`, be sure to run `./renew_ssl_certificate.sh` every day to ensure that your certificate does not expire. diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index dff0a151e..3f3683c34 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -82,7 +82,7 @@ self_signed_certificate() { SSL_PATH="/etc/openssl/live/${SERVER_NAME}" echo "==> Generating self-signed certificate" sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ - /bin/sh -c "mkdir -p '${SSL_PATH}' && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/chain.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/chain.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" + /bin/sh -c "mkdir -p '${SSL_PATH}' && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/cert.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/chain.pem' && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" } letsencrypt_certonly() { @@ -136,8 +136,7 @@ request_certificate() { if [ "${SELF_SIGNED}" = "yes" ]; then echo "Converting Let's Encrypt SSL certificate to a self-signed SSL" self_signed_certificate "${SERVER_NAME}" - fi - if [ "$3" = "force"]; then + else letsencrypt_renew "${SERVER_NAME}" fi else From 311886eeadd64292198a4ca94bf0d8ee0ee64feb Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 12:00:02 +0200 Subject: [PATCH 151/197] Renew only if needed --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 3f3683c34..b28bbabdf 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -107,7 +107,7 @@ letsencrypt_renew() { SERVER_NAME=$1 echo "==> Renewing Let's Encrypt SSL certificate for ${SERVER_NAME}" CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) - CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") + CERTBOT_OPTS=(-n --webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" } From 1ce61b9edc7e4d8609f65c57de12adb14a19ebc8 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 12:10:23 +0200 Subject: [PATCH 152/197] No longer restart the webserver when removing stale requests --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index b28bbabdf..72f70060b 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -91,9 +91,7 @@ letsencrypt_certonly() { echo "==> Requesting Let's Encrypt SSL certificate for ${SERVER_NAME}" # start from a clean slate - sudo-linux docker-compose stop webserver sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "find /etc/openssl -name '${SERVER_NAME}*' -prune -exec rm -rf '{}' +" - sudo-linux docker-compose start webserver CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") From fa8f0d34c8f61757310435d6395845b660e29669 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 9 May 2017 12:12:00 +0200 Subject: [PATCH 153/197] Added comment about changes to the server --- dcompose-stack/radar-cp-hadoop-stack/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 54a101c95..237865376 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -14,7 +14,7 @@ Run ```shell ./install-radar-stack.sh ``` -to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. +to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. Note: whenever `.env` or `docker-compose.yml` are modified, this script needs to be called again. Raw data can be extracted from this setup by running: From 7706e83c9a1bcb32faa4651e947ebc00e0c4f405 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 10 May 2017 10:23:33 +0200 Subject: [PATCH 154/197] Use bash array for docker command --- dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh index fc799a636..0ee0d80f1 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh @@ -25,7 +25,7 @@ OUTPUT_DIR="$(cd "$(dirname "$OUTPUT_DIR")"; pwd)/$(basename "$OUTPUT_DIR")" # Internal docker directory to write output to HDFS_OUTPUT_DIR=/output # HDFS command to run -HDFS_COMMAND="java -jar /${JAR} hdfs://hdfs-namenode:8020 $HDFS_FILE $HDFS_OUTPUT_DIR" +HDFS_COMMAND=(/usr/bin/java -jar "/${JAR}" hdfs://hdfs-namenode:8020 "$HDFS_FILE" "$HDFS_OUTPUT_DIR") mkdir -p $OUTPUT_DIR -sudo-linux docker run -i --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -v "$PWD/lib/${JAR}:/${JAR}" openjdk:8-jre-alpine $HDFS_COMMAND +sudo-linux docker run -i --rm --network hadoop -v "$OUTPUT_DIR:$HDFS_OUTPUT_DIR" -v "$PWD/lib/${JAR}:/${JAR}" openjdk:8-jre-alpine "${HDFS_COMMAND[@]}" From 637868206e8fd79522f170c568e27c0d3f310243 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 10 May 2017 11:18:15 +0200 Subject: [PATCH 155/197] Use new hdfsrestructure release --- dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh index fc799a636..971932f97 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/hdfs_restructure.sh @@ -8,7 +8,7 @@ fi . ./util.sh # HDFS restructure version -JAR_VERSION=0.1 +JAR_VERSION=0.1.1 # HDFS restructure JAR JAR="restructurehdfs-all-${JAR_VERSION}.jar" From 142fdacd37dfd4fdb2ce0b109631c34587b2d8bf Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 10 May 2017 18:57:15 +0100 Subject: [PATCH 156/197] Updated REST-API and Broker health check based on Zookeeper --- dashboard/README.md | 3 - dashboard/cadvisor.png | Bin 25022 -> 0 bytes dashboard/kafka.png | Bin 22340 -> 0 bytes dashboard/portainer.png | Bin 7071 -> 0 bytes dashboard/radaradmin.html | 212 ------------------ dashboard/tomcat.png | Bin 18593 -> 0 bytes .../radar-cp-hadoop-stack/docker-compose.yml | 8 +- .../etc/nginx.conf.template | 6 +- .../etc/rest-api/device-catalog.yml.template | 116 ++++++++++ .../etc/rest-api/radar.yml.template | 27 +++ .../install-radar-stack.sh | 12 + .../kafka-radarinit/topic_init.sh | 7 +- images/radar-restapi/Dockerfile | 7 +- images/radar-restapi/README.md | 18 +- 14 files changed, 175 insertions(+), 241 deletions(-) delete mode 100644 dashboard/README.md delete mode 100644 dashboard/cadvisor.png delete mode 100644 dashboard/kafka.png delete mode 100644 dashboard/portainer.png delete mode 100644 dashboard/radaradmin.html delete mode 100644 dashboard/tomcat.png create mode 100644 dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/device-catalog.yml.template create mode 100644 dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/radar.yml.template diff --git a/dashboard/README.md b/dashboard/README.md deleted file mode 100644 index b3eaabbda..000000000 --- a/dashboard/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# RADAR-Admin Dashboard - -First basic example of RADAR-CNS administrative dashboard. The vast majority of information available on this dashboard is hardcoded. At present, only the IP address of the machine hosting the RADAR-CNS Platform can be parametrised. \ No newline at end of file diff --git a/dashboard/cadvisor.png b/dashboard/cadvisor.png deleted file mode 100644 index e70ce37bde92aaa5fa3c9d094412af256a3703f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25022 zcmeFY^;=Zm8#X%hNREhrNDKOt1 z?sxG$=lvhf4+k$UE@$sqd+oKJ^~8POPn4>%ED=5xJ_G_Gl9!WGhd?kA?tY<|;1kxf zp|9W%mMcPD6AFdSuc-V2|HU;`kd=bm-u?U3Qj`QfN%7ItaZ@+(ggd)9S=!iJz}>u^ zE#MNWs+-;tsPi}tOqCnSfW<@l6^{3N; ze{L<28lPyySO!I>d=|@hE>^*))l;Rm8QQ?Y8M4w0f z-2Fv7Z(ja8pZU))?MAR#ue$N4yMnzahWHr-jpTLPW4oS$K;RI0DG5!l%>`806R*h6>2*`na2#Na`)msw}PSe64)@ z7I~fTh1B=qKw;P%u+VW-t=6QTrn}91lzI#-fUcS4SyQ7R_Y>9LCyHXyTxY%yewVqz@`JFaB&xQ59iQ}`n z?rQXUE)FF+I>iYMu10@1Zg%rN-8EDZ@gCbdlz={Vpz5LygF#pR{P@9?L*4r3u(0s` zNJd0R$Lsu^IhZ&?F5lrNj5*q6eS5Y(;F$ROp;#76++2y1XXO}*Wz+HIT<1AAIk#*>T-V8vPLtirJ`cK|r&4g3 zIFwVnL|~jpX|krcc;l&Tf??a6&_?Z}9L(T$y6cWTg~T5^1TUd?W0+Q+oiy!biTFCM zcp@NSVUYN)1kozj+g2t$Jw1+I#p-lCrAbXB4P6eu%l6sMd_4x3$i)c^0S|i7de+&2 zb?Ke$RGRe|o4j662wUQ3gV|*ow-v<^6mda^RQqoD&H3Q>n!|iO?a%s(THfJ9;S60K zawO71mzIV`$mgoNf1d*bA`Yb}h`y15&NjLeB%ydii>Kx==n3mWe^E)b+3k&gi--tuWN2A@1yO^K}m3yABQuV_!|a7D7WfczBKuR-PgyHOw9)t=?WGz2^9~ zchG``g+-g7Pu5e*@kSEN=E33NOvN(4@2($97B05tJzB(D*yhfLi7|uSy{)a!`@!zNOb&f>XS71)zD_h|xRGK{>7e&L>tW}O-!>d%g zmCoDLtXm8S`IDVctZR=Z{~db^j#@omu=33|%v4cYZDa@t6q_Ssgj&So>MSZLQeaSX+axP>O|x@jv26atZy4BdXNWA;(6j5m+@?BT=YgO_prAg+}+w&DyP~ zbMJ4TG%Od8m{9nlp`ksmbDW87?X);lvf~pxx(Cs5mg&g3fdPS_*VmVB_n&63@(~d^ z%vK3dJ0AE5$4?n#CR*r@t5J+;B9XjGlRSCF(Woz|!eSI^Jku#XI~XPfH3<`lm!wqV z=!8BcHjCw4=g+>(obtT~W|5|-tfXYz5@6=#MHv(>*C3FUCq72*XgEb|#YUh}f(wTt zoK#NB^y}@K=@8JMJm#5l<0g;m#?dVrYU;mRTLidQj`i~!Zspx|dCFkCe*I|wHI=i&-CyTK6n7$aY7B^;X99+S7|SUsuxP@Yf zh%DNVo9&p`PC=y9ws$uhy|>6qS7NS3;c$j*xl94y-J`VXj@Mr+D(ag`Cif1T7T8lK zb=|+%rKgNo9-jNL=ZvBcH{2`(0z?IJ6bsew-a&a1!K`Twh5z^!PPa>Pthz^n`HeEq zh~piCeDXgeas&;&M;r8m)C>xF2azBHBhIg$d|RYV=-WE}mdT#6_-uA3kwHFDbBg{s z8UBXbvt;F-yvp&)O5anNq~af@ffCMbCcF^(znhz;wzVf5wYHEDcuj5XR8d(^B>{bG zk7=dO_V%nRdumBbSy@>vFC~6srgD>c{j7Z+e9S>4I`5B5gW|J{@8+aYQfyQ)eGd-K zy;zb9l}J$`Qcf9u^(??B&1OrO253dTWdC22>$@Lk-AQdwd(6g(6 zKn~cxkFavl(X$5ZS<15OPbRs!A82Uw_?f)P7>@q4A%Vz+ z&)a{!n=qkKh3qdJ|47$Gco^i}tAdG*qd5JS7{SU#U0@9i9pw|TgCk)nuL`=a{CG&S zR0`n7R^aN_TeqBMNW7aTq@x_~XYVASNt~xZOVX(MQlH%yhYC%E%R~ z4zIg_&EOO>W+oF2eD0HR?28ljE+vILV??W-{80qhaY}3nYdZ&;$8;60O%|cFq!7q} zCHJho2dUy_l@uhHr<-svF@{A@l*p{unH{=yb>%nfLaRfV=pQh(*+LFAvEyf}S_qeE zZ=UUi@B1;6VVCGZun_y_=`-jmYz%`&$r&!TgHyMed!m0(7n0C>;UR3Ex%78$FFcBe ziD4?z^7F4aG_(UgMBo6T0#zs+8qk&ym3$0u-|^sGE%nA~|A8_A7NG?lclo)^m(n!G z{85jW(C8OVwh}X2#L(Jro$*TC4sg@!u-RRrp_O+G!>Dc^XrUGnmRa|NM=1_Qw`H|S z3dI~;r|l(S*gN=sP)|7`uqvx-UI{*mVMGzS=+RLLwmx#!cd zGnGRRZg?@WYm7%pYqLe=U1ejtdhut@T3Z;HY6J>(ZeKU9#i1w;?O#7c?uw?M|M@?wAPaq6LMXj}=Z$F!4)tRU;8j$?mfoCA znpRr(kZL@prA+8k&J-;Bs&ko+rTed+DoMF0@S-|rxzYQvrdV6%_-Apb=ic0LfwqyU zZ_zYqp_bhs$PW?%j#jPNrmZC@Yb7C&FgUi5kWkx=o$=x2bzGfwe)|vGoo@lFy$ON; zEOXy6S)ltk*y8$jZbkwRj?2r-Wgb`2zqYLY7KPGd*WW@mABpCQ?F;W6dl@*UGRQZ>Uxz{H zt4hgY!WH>46bmOdc$l}6l06B+ez7_=-~8NN+^UVtz%JA(i9w;9+Af!(9x%~|)kYy_ z8gH__DOMs>{bqtv@R&?Cb57k?kWrJgk8#5-qCVWCxrlr=ITkhD;^tih!{8 z1chono}Mon%{Dw+{A@5@kBbyYLgm77H9=NhoLBvu96j3b#xmPK2tNurC?N{%#(3e?Fz#d9|uQA$8g)52?}Gs zf4?xE>6Bn=luRKeYwDK*&_9YN3wQN2ndP9B3MV`Bbvw(aQg-|X0l~ACHGfhxx%^KS zW?j)Q7&QL-_Qijs!k{rLy|A_>>|}Sg+INOszADvN)pb-H%8R7Q$rOI``_G7ey+VO{ zoQPz9s*(43d+_zha*kFQ*vU^ebCGoZR~su&3wQHEBVoVY1W$$Qd_j`qn5vMo_k)u| zaWu6Jq81C`Gd9il%fsVLz1M5xd)SL~;Kovg>M*U1<+Z9-&x@6_4Yq$Pb= zkLkgW4eq}9oPNvdkqz?=k<~-*91%C`nf!#mf=Ew|W7;6tcj+f|{znIK8TIDgxwvTu zefPx!(2v`CwC-&YYm{8L?#vzU-TsQ~jeOuw07HO;!r1R1JUcskYOt%~3N zN+d{&=Ijqj!+AyH=d00`=xmWcZh|wPtd8ojAkNplaU^0_UX~uyr5{|WPn~X#_uM@^ z%CoZ`!wR+BEp2@+oZ1czbyJ9Xf7skABp#iu`eqBcy+xnbejxNWw@vrhm{=nby|NZf zGpH`oj+mRCnD__wc%jPs0pt_CNYco<=-GakLh1{O7{+`RCT#IvZjCo*2{-Lq2c>nz zkMRn%ju|pW0xq3y_&CL!%&!xA^C6Ju&!4|&-M^}H(4Xlo(EFZZY_j**7X$u7Y$OLM6hV|gI?2YZstLx=U!)&zncy}E3&2)Y1 zl=b83d+CMOlLllRtrY=lajmUOxZgXiAsMjW9!+?7c=+L+!`o~k zD|z&o7?5AyH^odOt>B#um94sbaUo-~Asax;eX7E(PlhZ)5w6FA@mod2W}qJL=O<)NNOT+>{uUY z@-WK#+?-pH(DxN{n+`CAqt!+D-YA;LuUA+l5@KY;x@2Qj^a?(IH(c zXPpG2V($C!kT3E+5rd2)hZl1*OO%vbltV14aA=b#Z8X z<&t~hP&*205c(R6!}W^hn}`uhyF`p)$wXMg_u=zM{^V!@=} z-rMrOuGMzntDMDU2q#vEJ>VV{Zt>isjjgP^2$6=~$6gr-y(HOQCP`O8G1pjKi7uBI z2NdMD&``qId`99jURD@1G+yAAy8>_aLF&nyt8RIOoO?wy3?gpFe+6QS2j3+2zQoag zE8Hq#wGLb4p$~G>bANyHohfG`p$+>gUl5J(eMU{(cJ`YfppAs`0UHu2cJ_DGwDKDm z)#`3k60r;ZoKfV6&Ziuk)S=@m*T~M5eSAD~@9E<%NH6Y0*Qc(D9S^bVqsgqStP8%> zM#&5sYD%y54JI539q&=i-4vhW;jv%8IS z*o1`TZ{7-%Z{JI)#;=}id)tE&%l*;2=sa_-Gy`_+esoMvB8mkB!pe36kEW3g7jI$O zbXHB*DpH!1ESep>6S4P--zmYC?g!3T5jndzszhOG3^64fvek4vEGt+>i3P z6On5-*O%Z3gODnB&GprK$lt$3MYvV_);-+8Hr=v%9;+j0(XlPy+J`+gpO@f4R5$op zQ%KcBN<)uNPxt2A0+(C;_3IoQ?CkQ57&vCR(+e9i%OxYT0}e-WC}MgBPaYsNku7ho zd%0CaHcWPrN?*6f=Dg7@r$t3p(BjqsT>7J*08SBZt#?$%37Xv4-r5U1X+U?DtpG&B zOE@0ZNof`<5Wnhe1Pk*M4cuBEQ4ziI^77J2d6+uCcI=ZoaKUlp6WNu^GI-V<#x``U z&Z;!oY&2RONnBfBJ5^!@Lj<%1UY_iXirfk#_Lo|G!x0!}ib9dA{1Pz!cm)R1RCWV? zm!rSK!-Ty*_fF?`JYK7FZX}nj?05f>Vk4has21HeMZC>JL34wHZyvTz#NE>-c7Fru9j*so_E-WA5+Zj*ix))=_q2-N^}3s>gH*eXF0884Eai_3Q0LkA^3Y=dixKL>$teY%dNGbcP?I zk1vicEGW&4U_Ad@#tjXtsx-wfjAT&k;a37J{FH=74K8uhDaB88(o=>gXy;o>cJH^x zLBJdZfE(Z62j`C=Yz*Kx_4Z2!r{Ufr>k~`kyT?8WbVAA`Fe5H5`Y`G~4Co#}L{_eF zBw!a87uehr@^syAs3GR|_N6*(Qx=2kr{_jqD*`z)`lmEQK6XN$yNV2?6yJ??>LNtK z-l5MC^nvI9)>fK4C397a+2Y(w(`OeEXhAIiW&j+{IXRm3?+CHO+|ddioq6?aqzDdu7>tVUag)bR`Z5SX zTV0eHS){lj!7-`qVrLEqzmidTuso4}{mO?GHu+uu%4|){{}^$l@wo8A!<;Wq9Eb-@ z{QgsIoA~=YrZ~g+Hm*WD+gKbbkj+XSbGu4+vvPZV**3c7@*{!6`eUIDb)Um*vu@SA z_O~i|=o`T-x9;@KN2{~y&XxLpM#~B>+7#VCclLxZ#}ks=qT6n^NR|R~dfN!8WV&`% zQc{+dXrH2GVGv976U~E1g|BUq1oe)~-_eIM_|Kgw#SYF^+rKj9q1FG3Hsyo0o*wkY zF|eA(Dilm4+-%4$-!7d(AcYZ(kk{z%G(Hb4L)V`N!!R+RxK|kw5wT4iniBucuM**! z$WZza6_I0xGA6Zi({ankj~`z;E#G9U_9cXJwQ%>}_^VLzl&Uf>H2U{>4H1H}N{=#! zQk$$`lh_h!;_K_nOvZDZ8woQ4OS+lNTDUIDQ%;Q^9=j0E`5o;dcCjMXay+fyONRj& z;~Cj-cJuInc1E)dNAc+g2Dbe-8sA!;FB>nFucjdq3ftV=;5=hfBa4iTyuI|iy#cAR zW!rPyi_2ys6=d8~2pN9(`}g-lZLs+jVsi6gegANjRRfzo+T8tGC=H32i)88 zMbC@9r53wE7Qq_e^gFgVdk?&>B5%(UJolD33#cGLBN;ChJc=!sUrqAJC)PPGVk$Of zMuL#mMIt(ES>2l>9Y-cz`A@!5kY_jG=IZ=q;J-;XL5F55C_Qv#B!ds>FAakrJAVZD##+F~mlhW62*X2h8ut2ZNMB_>Jh1Zf*^>WR#Xc5Uef?L0donx?1yxOoXiH z&3^vI#)fwiV>^8q05-WOILqPCT+>Rgiys?FOe$0fpohrj~!9a7a)P zqL_zU#B7Wf6zRz}PsYDIVU?-lJUu?fOfqUecSVEkv8tYp*jvg<%jVwql&X9Dq?36Q^^&0bSy*vMjpQ`H*B)B9h@9r zdbaQ9kjrZHYkq!?$Vidy&E}thCeKKWpl0~hKZ-WF?gz-SDW+e=nw#2AEfyS(1);F$ zz6ZeV0X(75r?9AGsJ|yVuUD5eR36ys(=no$H>HM5 ztI^D~wCrBWL*c-K&K?rcGu0G}r}WOy70;U!q4#p#a?gGwJ{E{?@;Z28*v!4ey|XhF z{*jhjotbPTpvm6(Zc14L%r3`X&*`OpdFPB;ebp6wwig$xsH8k$i!VG%C+4y3yL?G^ zG58}CD}pUM%k|)Q!R^V86Z+8mwF^SFLZv{x-f@v329HcT9A;WQyDfu1E&}QOi9V!i zyN{rZlaZB`-Eg$M*lfM@4D|fdfB!4xed9V1abcZ%fqR#2SLdT6f?8zRXs6{yUQs^c zVMW}ol@wI!xP^qY)J>8G_-DKEMjBk6h7jSHDRK-_YH51)i;t1VG78M}n#V)I2fAe|y|^HM19h779n? z)@6ynI!!D21qIVbGpsw-Tn=#8`(X$q@?V3&-E4LISzXB#q>+=c42)haw684{pZz`< zh`ML6j`KQ}AiVQ(uf-F6vh$+#Xkw_MnbcNS?&kAti}THu3UDv3R--ac?l10r!3a}z z2?#LtYp$=K=|3iDFm$Bo?cZK_U}i7;Uo01=D5sd1@$Tt?u-HqX)B3=}mqp6ie%Hq{ zIRWc7!*6^>|NJ`#InW=pQ>*vt#j!nKQ=%;%!wx3Ns0LQ{WAdb6&KguF<>iQu0tgvCAr)c%pKX2zO#7;Q};0@4S-m( z{4L;me)-13%Bn~V2A7H%{;s5=!jvShH)|Olt_wrFmt#ocd*6c#aTcVRn!-oK!Jew& zL=bDRx>~2d;xUz?lp420t@(-OykexIzUAQou$o5kkM3^l(Kj%9B+`bLQSgz02@w|5 zgdQBhk*u-*qMz_QYQ%+zzs2sb-o!dKNh&taRsBT=-874hDwNJ2Z)j*3_dVs0e;UMG zg?LF?-mDLSKyx*@!#Z+Yk6(TB!F)b^jJKe!K0;@ zG8!Hz6tvj9|1gI=W2CX0GQ(f^_*LBLFyfg{?ibW%s4dQ*@--mfmiyKFfH{#z<6QK- zrnZ0b#rRfTV2iR)Yh5QcL9@cq-lH9?e{awxvx-6v@mrZ;=})i_8Ov^x*K=#ROuSm( z?2a38AmTiex)!~}b8uLD`YD#Z;?ML?0$$m&&`0O5crspfnKYMPe&uuSKbi!@iBLQ} zb{K5Ld~~1M%Fn&F6^8K2?)$mG!Fe(*8T-DSp!QK6r-`ivanoYN``kUr>PftgXM&mE zW#Z%|$g+rwikto<8qAlks}ER@z6)Os$qX6$sHOyc!%uA~`HoT2NkoEj(#M=Il%tj_ za&m>^Zwng0ef*%RTsD^Ex){tgo1lY!Y-Ba=e{eWWsx(9qi(9rqHoAAJpuU4+s})jc za}%_@!c*UDw_O%V-K(7R-099Cb?tb#3Okwq`b_x3SL5#hCeduP1oTbQqQj0u;qEId zm(m9$oBvk}fbyj$uC8cL2nXQ#! zORcTqpD>J==%tWT*-Ny*Mf#~AiZxN`2jXI)meuBuKnP@r)d8E2?Rx*V`EMMMgw)u1qIuhExzKO$rJM zHa0dYEk2Br2@bY+m5r7Re!bJ{AO{xY*}}vVsNY(TZ({^6w~!rbZ8tl6N63TO<@xy5 z+^1bhJG-sjK3XhG4=UNb`?|$0Khae$NfBIWY-OKb{~<>$3VH1AUtZf4YlXpdQ#`kS zj!&8)%HUz#1bv>`60n|>lren5g|}c6m3)04i$qmyCMP8&6{alV;o^Y`(YS=`~)stFf>Ce}eTGU?uyaXuzukghL5k3x%Pvyz4?<1}P zZdXR1TyBGm1t4wm&yp&2nr)Az zveAvPWKxz3BzNiW_Fnxg%5U@mBD!Y+6j8-kRQ7zA)uSS9hwo)SAj#Pp$C(!9E()}b zkA33S@x~l_xvQ&>oU`WqSf$lKeo7#nFP;CFuYE+nPWW(m^81v^r-JyY*6Yh0?-Q7R zO-B+DX;U$*{>V303q;qO1Z z3dX_%FTneBt#PoyeanO0%B|s0!ulRczkU~zR3O`S-qhA)cj{Q}nHCcLO$_F>6E_n% zOwFkDqRD3`{r-HWGWr)>gLRUl7KM{DGf!&E>GH1Q`0-1{totfrTgCy$Q{Z8^ItKSk zgm@nR8keQJo)o*}er>rwN#SyNLQ^fRnZlDewC1?bj6vvY^`ozHekbruHjtl(=d;av zJ=NpTZ2iM6P~g01IU?nFpo;}zNGh(ZJU~r~$FE*o_>4?PSwRZ5cn|+R725Ie4JY`L z|6vp@*5op!Ibi1V;lL^IFvHmEfItSH`(ya4sr8YJ+keY9^Wq$@uRe)vS})Mh7Dm>V zcGs(uS-7|ek|$*BqyfFbZOQWO_4n`Jr{vo53=ihEDsHD=wgz+Zxnt9Z?Qb`@@)*{s zH$Wi(n>6o>ztCy!02LPC-W`s=m7t&4rF&c)J$n6*T0?1aZf@@8Oy%ZiEH`{i-cY7c zv@?>=;Uec|v!c#%-qu4L6T%+o!I-67@l}oh!JMUR+~{@h<6hc}=CknS+t9$trExXc zj=W`jXl|;p_c;luM1+kJbnk0xu62gTBu`ijbOs`FUu$by84fR5|8nx*(vPd_NEC+> zg#SM4mI{wrLU5V$%`YxWCq0f+@q^Nf<~X6BCUmbJ98_{5)!GN6O(rTw-Yfu{y-mH{ z3G_Fg$uImItwo)al_lhIL@5$cWU0Vh#Wob(?0@Ou!=C$}0;rlv@oN_zKLXgC=2xvH zZjYwx+8N`^!?vp-@%FGbc?oEii05uKt2XN*6oXzW@iRKdzpkL*qJcja?+$CqRow@c z2$(Y$2T`aASMprTmKx-fZih?LA^ZK8dpB|iZIg0t*Z_4}hk^JxM|Cs7`XYlXF&6;39$wj?_+EYRAiYpTvFyv= z&yyR|&irKfX;kPVBMBI7;O~RorCllE_wq#q;UP+J?(=tHQ&=O)FK|9G7^cPH`z`-pq05s{SQ-5g?&9 z_2N?hPi+6EZ^V3q0J`qqP^;Q@2;zz9+l6^&hF;xe?2~Jz2t|i(PnJ*3nS1 zzlk4?2@hgN&x~fzAmm$8CYcU|~U2Ssnr?Dd4CdBwD*8qoY<_X$VncmSjyNJ@#1jjDtth z`*l(L!|%LW3wiR1~k-YhyxauQgS1y03Bqz_(=h0mpGKFD@@_xk=b) zIYl@{4>pQ&GKG9S9N80J6;#dI=c>N5(A7Xf>HFS~e|zx(03Qek!{sNj(_XY*~B?2E6|F{n(g*Z2JL0T~%l znfo#?vYtC_7)VBB_7*d;vJ}2Efz`y2`Xim%w@7NcvAIh-$3teY5LMU7)N6VWcyo{d zL`bH?Q>Wdys6~A~r<>Xe|0%UevWE*5x{#oQ$^)T#rv#N)*{9>0g3nW5L7z5_>>i%$ z&CmH<`n+^nDrGt?|7ulk&~mYw)2xfCvmix$GrK$qJb2t@)D=o8s93hy-rwSTdlpK! z?vhOUY(x8vw)V`P-|EYe2p2kg{0~Im2er!?o+#s)7i;=#X*=b#Ilp1#(-xoPk$`0j zd;JPW=af8m`J8_#Btwg@^)*q>_Zd~(P1k8FU0g4UZ|%@0aPRcSqJ6Jo=M5qLP(2C@ za2p9bONPhu$uKTX&V!Cfy8ogPqda9hZ+t#Bu)OZUN`$$r56_m&lxGa~zr;a&QDx3- zb6x?CLqhjUIquQJE>tPQg^cgBoYA)HUEfL_YRzqPObFs^AQbPTW*If0$5zhK*=6bf zWK$c#=D7X)VPi-z-78%Emv**xYepqgI9a)9>v6drX5X6wuiukhk;lvd+^2*Hx%bH` zqSr?!J3oq6?4@HM9;dNf{V&bVHODwlw<;9=LE{YdMm@!4Y z70Gcjr#W=-#z@;7lEe-d#j7&Rghaf6R1iGFWl`NIZm0H=hb(N9O0}?=_#^ zoOLtQI}0CgBIwfP7}n9~`8hp9WhThF3zgPm79^@LOVs})KSAt2@))FKWFB+t#b#<# zQ{!}Rr#nbAd?sdR-w&8GJ!66gKl~?+0qHS)V`@6F;#?Ea12w%CXwEIey$J!BgF(-A0G1KOHig&th5#_aW@$3{)~DMDJG(> zi)#P9<^B7hq(A>qYO@w)5l#Py`}BW@cU;s4Qj1 zNTH01Q7of}KzJwO^*YeglR`(L)zsndqoPW$tL~_z)H}R>f+S*!m#A^4;l*Avr3Vwz zAv}g#YJ5MrxLn22arIr}wjE;_bM)`i%R9y$yvJlU@r?T5P|}>^Vyi#U;R$(0g4*l} z67VR-*MpA(a-3@87Oa7C?|B5+#H-mCf8}$MbK@m__2-wsq4POXYwYnPk}2Hfw@&u} zO!%bLcfy1P-=9GY*k7o8j^7jD;W^SbS3UKH%Uc@UE)NtHt@~5jJlxb~jy8SJlbhS& zf0<25t@Bgi8=r`zJ5YkRbBl)YG6Q95V#sf zdA*EmHRSPJ?H1rm&N_Qy+&s;v9*XC^FE9T-V|4AJudlyIWCrrWlIZ0<3hV<=Jm{vB z;FBx$q-SMiQ0BbtHTB3C**Z?KqAl}t-rP~mik8U->KkF7qcb2WsG4Q#JAfj+u)~5r z$9TvyyPyB2p0<5Q0KEr+r@y(qnxNHPgQ0XN?N`X5^om8?lat)5s}C9srqPG9_H`Gw zDWZG%Ws9w@=>3(aMJhSDYeBJWu_)B0OT)6)^;EW_E7Lf4Y!~lb&~Yr=(12!Rb+xOD z3l~UBh`e9N-lKPYFJ~GM;N|tXsmOOe<#}i*)=_Wz%gGNhWY2tBvK3zAkB#zy=sps1E2!c>m|5lxU9hE8hi!8KW66&3;L1y7) zZqCl-x!PL>SSKLI1$^O}zJu+%GDZ;%mSm8FgTOWdw$Mvhg5da(jTRD_&^I|fO&h~_ zad|PE-oXKl(Jy;~W;bZ|^~8n%BC)UtfGM9lu36pGVpsVqK>m+^d~!laOjuK2pKJ_a z@gctuOE!kZ^z0EYxA=ORJ36vw%aP&Z#5P3V{NN%=C3AGz+bRBr z5dG7&D*C;AWxjQKAqvGH4_M5yTK7+FPOm>L?|7t&VSU_wgy;tgk=5i0ggZ~ht+~?F zkwj*ag3=To*8ze23%1p>5Qs1BJnz^jwjC1aoAeY_MyZ7YUQKCUdCLgaY!33y$?OTQ+tMJAeOL6V+p5VuV_PDr2@`7O;f_#l=#lqFJvBxF3lS{J0u= z5+)95Z(9g7*YKMA<|_>(m}C!c`p|T3{Nt>z^W&$g#-e>F%~FTfJeq*C&`MSo*y1-v zAS)qijg@2I;^r0tS*nOo#;d}_VolMUnW6#Q$MjMF<;zu-gJEA#WGMOFG)S_WS#l_L z07vuA;yjc@OxddH)AmraCQFa$`0T8%1`gz?3P@oylxpS~n?{~R=MAm>^I8BBM#uVl zA#XV`A-3r(LoTAm{g&Lt=7MR;5AgvE`4LDw&zlSHhy8jrNTM2J{?N%b?R=z9h2P%$ z>F&d1mbi8y*RtE|ZtmCgLC8oB>W6Nzl|Ya>zr&YdT|c`>Ti>_l!cWcD6t~JB#sOre zKh%rCkdqsVTxo;m+~J5yRc0XlQZL4a1VJUZtL+6={4lBWvd0PtUuqWYMfrJ|e0g!ap4!*+E6{mzEo#+D%8t zvQ_|hpD{8{v&KW%XA;Z7z9phG34Q)n(e6o?w5>BY68TWNvZZBl=Va`((r;G)Nawh? z=4NK9|IPH@<#yMAkbU|ze=Tqvhrn$6^b$wnV|F&EtqXhQy=%gIEzNZc13J3c%iSBb zr${mHo&Q;{yZMzlZ?raMxv5i3)yOx=NkyQg{1r8oJ=!9@>5%-Y;CEG0M9)@iDGM8c z!}97uSXUW|LJ=Hd(BMOHko}rN^RTk; z>u|LW`Qn=|U0b@l=g`EB52ydk>+^mO_83}EBh*&J8e3`DZyJ3*_$ofUD5&eTp{%T} zt*x#eT5%Sad->wUs{Z=FaG--PJL=)s1#g)3h~{w-UZL2TigV>Jrm%L)YH+)|=czNb zY{Jueau{iBe^Gs1ApzgMGf^H(q3z#oYh#Aha)oaw`mys`<~dMg=<9X*C}s_=Vo_j0 zS6ar$&(F=D#v0ZGJb;Oc9&9typImfIs+F$`HE2$_ytojdtw5o)^ml_`?=(9CHkUq? zPEa0JQQU`!7g}E)P4mUpOnH=Q-MgT9K!^)&7p6_fzB%Ebi@VW9)(+(u>M97$fC_DM zU}xUbwPE3R-*N7TQG7=E&CSi-&CPZG^7SYa$)I=6azY-S8p2y}Y|$@Ljb`qDzNZ*O z$vCsE>!_u^f^{z~wVs?1r%O69KS~DD8wS#>tdiF1ER#EBmn3pZzuxJEIg=KPC!zSP zH>l_0S3&PXdwbCG-ps8v4jfxb>zj(Bg9gv0Bip$Z0vc;!SQLSGpJCdi)S8C|@pFQ7 zcW=-5w>mP;@4j3@=ybV}#dt|05V$%nw7S;oFi+&r=?NSK3}&&Z)c{G^@$oSb2q;ze zcf&AaYhqSgk`7xLcOUZRK=yBHv!WwPA6DiqB@L}@?$KF0IZJ~hzYm70POH!^}R zysNCn-qO{MT30_G68c$i35*`X zv5m5~sS++JETxnP7u&nu7}(j(7b;~)=90HaDUPrs1yHD^X8$7PUPo&nD+~rb34?uu zkFDe#Yv52Cnk-4ljF1k?X*MXvM}?-*xDz{U3<#VdI%2u$1ZuzgOK?Xe8{M^1rtlHLX!!D&J>1;v=W0O1!J4d$46(Z7tX^*j;vpE$rPg z9TYoc!%dJprf|}R>IExB>C|KQWCy8t)J;v^#>-h|o_rMMtt;_U{uqJKyParQq-|T? zbLMxWaOZug84z#45*Md(8l_(R zO{#9%s2a|I!d-4Y>y~4Xy8YUV`*-2R0*v0ee%@bH%y8prj0N$GOc4b1{vg$4`g+s! z%I?;})Yi69hYb@lJrOsCM``$m1XuF;^A&p_0e*%?qaR8ealJH*V?A9!th2qBr%b6b1Wi80{V45Wt*2Jtje|BnLetot*H8S>6nA4?m@zrPq| zTR+bzZwr(}RmrayEp+Ln^hqc5uGR4G=OyUp6ML(Eh9N@W|7%-#Do?tDp%*~HW1mh4 z!A9l9#;G{Y3jd(QDg`ywoSG&tXMq>abROGB4IC(fZh*rLu!&qM|Octr6w-QPAs(A z73 zFykn9hS(3ch{?iW^lZ@~ph4r$JBFxr`SD|s3q-n7?x32vyN_*eOIk7VIhg4ExOY0j z<>Q+DmTLh{3i%l@;J;_KupnPhddz*{h@7B6U4F9Plu-bQoci}XnxsP5svL_p z#Z~hLE?Cw@IoW0si!JdLv~5^d-(?eV+0+>1FHg&iw5%4@G`)Qs9l0|Ir-A61DlH=` zNx?-UA@=e=ZFYadVIU75$c=1??#W}u&x3!S*f8MB7(!dgG2KQsyc`*Gc_$YRJ$_E)W#Py@Wt`v$qsH5c-^ zB~s`6(>*Z}je2^Bl@xMAg6JO8fR1YxGK%*Wy6nm3x>}maW4a87!BKhrb`6xV@Ys z-vXXrCa>~aq2At6q{V-p>hK^`o^{NnCJezt^zg%)`sOWT*q0+`QRGzC3+Fby+@3>X z&c5+Ul9k`_uMB@3+0f8SrC40B_I_lqLUh;@4J_s(LXx{?O+yvmPx{m7UK zePWar?hB(l{?AVyI{HzD!8T(a)$}3A6bjB69_bW)Y>*UCddjE6E9>`7 zhE04NF9jvlKIpJ5S+kb*30^UmVjK>9D4ehqt3I z=kUQQ4Dx`Ps(xv~_T@ISefDh*hTxCAZ%gjG`Bp}pYs5075KxQh=jZ3V%+|HNe}|k$ z@&R`mzq&7&d5;t#UaT{??h;qmZCcuv(1}V?EcoMI1g?j{%3duAbH6YY$>{s+1Hphq zqn;%d%jlbO-Z5dxuS!N*M85KI^ioG=Ff~N!g$NL;Y5deMCX-ew05{wbKUK5aJ{Ng2 z9bO&vwxrf0-r1}B7X*g)KlQtSny7|`9NZ%SqLht@ARxrToHdFcLW0J|9vXS(s6(L` zY>pNw?xzAe787Z)s4rWru~m@XkE;!qzCgoe7M3xB5`+XBBmx*Af9ESH5SF|jStqmD zAl5Ib{sw>RHT-fe=h{6YNCzgP|kJlZ1!$mFrGN{%}0H!?`$Bm`Gm4% zPj#xH(Fy@C0SZR?5qCWw!*8YYE&He+>jOaiN#yEiCc9JCU_xHU;$%Cqm*v4a38+Pe z3&#t8Jc7QCD!Z2+T%3F_Iy^5Ag&lOv^WgJ;tJHnG`c)6ZA@Kc9pQAFUj&NE2c!~}# zcph`gD=Cec1l8vuFw(HmlH8Y%HRU%-zmC`axDSPlX~s2Z_X5iZ0ew5RTQ==%L7e2+ zxhq2016X1~(iji10}L$b7dd2?q zISle;PbzTiR@sn64fgH0ULOXDgEiC;zjP@B8>9HCzpC)Pr9Qtt5c&<9sF9^y(IDhT ze{pf)JPm_~MfZg4ej0D$QDP-mh-ztRSyhd*u%NUQwI=Oo{e4--RYtx zSfF4wR;kH!Rz@2Owuql}=}%Ny$TyVp@m(YfSdw48C_a=|Ibs=L)J=)2QDsX8vihoy z05vks^=aMTprW*%Yb;H!dv!NV9`mNpWdI(fSqAe2w_VB=(5}14e@9aY7viJc#;esh zd)LrDAhiigT%?WbY3~X(tqD(L16mtwdcE@WhX}b}=k6YQ6cBO7xr-9XCZD@8Q79?9 zY;N*RtUiG(sUF0lwORvN8bd&KqqX{B=?YQ zTzP1kTcak60CjxT)119(shX&*=Lx}(JPhO%l|!TjFh@+KNeFz4dB&DLX7O1x$tEn`NJPK)jpJ;i5kgk`NYpCTOM-#Cps5BAhcx-_=wO}dC44w zru+4C-v; z{E;=FX|7rnP=%s3AzqzN5 zD^on7atQx_=RZBm12eO)j`~F}kK5?_=g=DjInw~6Ngfs*9`x6X5v#T~D5>#lDq%j$8%1MT=B`T#r!6gB^5)I_b$V$tShT4QNXcSwsJ(Ewg;xt9h5;l$0 zW&en7T{y0@txgBFGTM0NwjreFm0fXhTz>z_?2d9#3=Du6KynUxtp1%h&+pXD??2(7_{s8Y?|))GoKb(^G`U4wb zO@BNZ?v2OR055=JQVGvH&dn)w|6t!d$FV$!ZSC*>m7f@ZA{}v3ZkI7bJHPprwaAnv z{fsK$P+WzlV?!_mZGr-sG|*l%=P_USFIha$pqmb1n4K9nf7iPQ%)lL(afk)QMj2F> z|4&4cJzuENV2JP9@dU3_?7LBPL8pmx`^G(kj@Lnrdc=``ocaF>=1WxXcw7n!YGQV} zALZ&{>8PYZo;R^}r^CaxJs$HUftGp+N?#G9*vjQSv^T~_O(GU`BuK--(* zyr1@b*>>e--CCF+n)S_*f(|g&2OsuAs|6{|=F3%lng)Jy%Zi61RGfSlF4nt2($f;IXQ9`PDtP5S*2>H6v~QEU$Yz_v75!Lon@b$k#wfQ zjQ!QcXTwF?ec^OY)p;lks^WXvKeARhJOrt=GUe40hS%ImW5ymzLAh(WfLiMTG$iP2 zctJEKJ()w^d$N=DFqmn*g4;X&BgVh*SZouUIGf(CI}f7@n#zGENTeCo0puz`oK7dINP52@hNcYhqgzKgEd zc_R_eTSG$OHS;?Lv$mfkX1RQ?ZEJJ`(hRE}FP~0+fI%QhEFfLv!ieOItdz#X!}&ke zePvrz-y80xbm##o0f_+>q(c~KhXDblQ$RwLPH7mB?p8qQkQ$`B8^oc94yhj^-5`Co z|Lc5+b6(86ntjdMvDSK?ySC}}LA_PWPtg3xNBsJGhSB1=nP+Og#wH=FA2b78tWmUqTWVP0 zJy}qlfD3g_lTDzv3rQ^hBfh}TNOrgOcP%+2n|KGt}gMURDF{IZ7&h z2olilbYzzzRCuJ~Z5Am|AE**HTia_NS$BzXbrlF4otglS8K4j&N|+C4#q> ztN?6)7Ks2q=%J_2g{PWV4QIc-YJt$kEAwy-wW0*-hcuDI0sAH`L5If05j|bJ`3N@9 z7auXI2O^`A9%#}aFuCC2C(eG}gRAvWWi?i;>YEx$d;oxFm+b|u%H1anX4wak9eQ1~uwBxSV z4Osf{OlV`^3Z-$&7Nn*=p&$&RpBZHO_IFr=);*_IC`@(yfp(`f%$jvI>%aQcYMz(2 z&q?PTR$!K~?pbGZz98ZP`+eduw&B9fQ~&}zowgzj;Yhn2aK%{ukU{K<0)jSN@)2o9 zw*{{rz3g$C$p?V{;m0rDR#Y=N-DQdYqMEN^UxzVo8m`DuyHzP`u%$jD!fiL*b^|Bu zT~P7kA!#48`n~45ck3i;?HxjTzGK5>dgs(bBL3AQ3Pdo%-%FUOE_B_q($JOmf;RNs z@EQmPuZI(?P5rsh13fSs-`RPL^FwZe!2$P?Lc-;9!8`%eG?hFvkTVM7O(r%w=D$?| zyI$GKB=2C)Ut3OK7ZLfdi(o1XU5CGnB3Y<7^pYfJ*!0>>m|7hb=Ai*0i31N9NPWKH zrB!}d5_bm#%In|6TWTe}cWg7+P}QP(NEzNZuY!h?+eAQD9O3i!?jos?J^xx|n%E-r z%8A#Lab&*e9b8{YVe`gRdjEE3dr?qI3O+m0n2l;1_<)UhKr7ZU)FtX#$GUZ;a_*Z z(a@m4o9r%@u2PWOc~I!MOlQ%A6&vM(WMejLtEneJ4<4fpT&Qv5DFZ018axzmM;BR9AMOvJH0;$@m#Hj6Qpa&6hAZ> zrsVfmMhSraFkC*Z+RRxf2|o9-5d|(h=IGKVTBI8+3DLna?ZXGn)SHGBWm8@E3)@^nI5OZy-B>%S@19X?K__}8As>q1P1a1<1}$2K zVC(@7nh~$C+_fT>n9h~bNT)k9YZarO5^%09c14&92MM}oE`^5 zLEiIOOeE55#xUkaAd_!kaPT=kp1)sF{x+!c*r=XC>edulp1%S7O!$G6Uo9{Ig3G*r z$zT_D^3T^|UuQriH!^=I31P))TMRSHGgeK6G1@yd8LoxvCXOD}B09jl4$$gQ$<2ip zlN4gZDDgv=&Nrp>JvD{DW9iATLfui*stmMpqu66B90;PU4|^qLa&K&$3?Xz6^7r?r zqm;>S9|%~f@UuHLmMZJiMnOAH`!{dIo@>*>K{v=)XQpIiTR}zCkG&-GgCo1;S9} zv1|OR{_X@jzkDXNh0utPL=ZGo7Tpm-oq_{^D$eGSZ;vma0HaE%5j5VE2$afoI;9P- z+jRcv%8A8Rp1Nv5SfPGu9LT2kJD_lW07gX?oT6Fc5!J<$EpK3|4T7sG1_n0Hjk=&y zmx`b~VD+<&nZP*DUzAbooU;4r)9yPU+4Z3zMXekjW8+|B1MXd5-olPcb)A!PzK*ct zJqSL%c9uQsCl)9qH~_O7Y{bC86141y*Nj0ckcEQ8+y6ZRloFBLNwE{0o}AS;#oM=* zWkxf9nv!9_&v3A+JZI@G%x$R|)zJfW2V6PDRk@z?T!^sYEms9f*64V`1ri_aiV6{M zREmP}tBtdcj8h8}GnQQ3B$~z4pyOYK4M`e0gd>~Hf+RgXJ?;}7A-k{yT}TE7nge$U z{Q-{_Bb_jxhajiU`Cw;V!QJISid0Y32k{>6#> z081pns*jO_(0B5PfcAWUF3@_f1lDy@#%yO;ti*s!yXqT>)qotdb%ow{G*xv6B3pr6 zv6L?*Kk?nn{_8=OOg_t$aGucP(AvrJgX@H1oPffn;;I>%cieY1i^cfiBa3l*;_1NK zs5$+)zt~D-05>~Bu4O&ld@hs-P1kjU16bi7^^aUT0kUWk100bf5 z*WCIMbTpjS>WqX%5nUh$mi_%(pZXqO{PQFKy`5{Sn^vf-B3oK?S6L`7-rJ3ota5JK zHk@p#N`>MFIY#1AKBvItL_Al(&*6X=0Tiqc6(AYvGW&iWgmL)c9P9|(00av(!SXbL zBh@xRS#Nkd3dHOd7TnyM__Wtovm56mcTc0hPrZ5TBDQ{~G_vnxPjfx(VyF}4wEjAR z?e;2zkA_rMS}lg;=blw#X-S!GCvD%NO5pp-Cllz;WBg2N)O%SMnxrHTprYuBaJKL+ z$y)LBozvRShce{SWOx{-v~s+yLOfy$3gUA{J`BnuO$Y0AGyj?sABS;flX5vY88aA6 z%puYJsS7Zn3mJdcV92Bzv5X8lQIccPPpYw@y}QY_SR5#UQtK5SaBHgSTRDUP-^OS4 zX9Rh$!plqK(vUb11Cz0+wno&sO`j7YObO{6xyrhmvt>;-~GL_`#W@AODD`XuPmozG598PNmeOq zCrWj54;ENRb>jFEF&YtXiZhx1yu*}TMf(aLvl^){tVCajH z+MMUwEAxi-(mi2klpz7{zY0uomL<>a*wHn(;`&XVx_P#|opaNpc0lnxhmySbv+I0K z-bQ_M+`ZTPmKRogt{^oUE;-cN!+9-SepbM0%EEGuw$89dU)Hw8IiKj2ci@ zzs0=-9rcw5&z&v0$Zx+Al?h;kBYO~V=N0}3il)+a>z;t#lf*7{X-4j1YBwv-K(U-v zhhz;9`@XCz4-o)C)#8b!uf4dtJoznva||927+h)6He`cOP- z$W=z}&@suykeZs>{|*)vesDx29vD_QV_hVPYx7rL8iKA#S%u%D)gU$d;WV$uU8}xO zII*?71ybP6MueZ0*jG~WnJZ&IdU({nAEb>Gdh_ePKcJxG1bP==+urtfTRl}?AIdmr zemz03w}|<30Y0CB1JezHIGISR#;epB31xYcAZH zB%KrlBqZ{*e4I5Q*?R&TgcRZJ{Kv?7wg9E~xit$KN{N`QXnD4DrI@n=>9l2^(k-zb zgfConCoVO;o(o9hqY=8lTaiMqQq1Z7dp7a4M3@l0cCB64=NuyW2#tpcgfR4^HCLWS zjSgFmhs2HF7ZzBKtw@#5lbuZ#=IeO#s@G)`LL0|EvCnLcp7kShCakqZ(8YN*7n4b5 zb6^(Ic82&>f2V?pXNE2}mHBul4EN1cnQ@cY!l2YLyd>Y&xAn^wZD`uzk;^h^*6MMx zSLO=Odik6bMXZB^yT}uU9k<6|EEH4qg_XxzQ;ZqCyp`SHnGfClo2}tb)^)l%FUh&x zsas!Sg@+?&YPHMkbc(fEl-TcKt@UNY4F^I)QkVFe%^e258?&DeY;Q_RSB3iP2sy ztwP4E5T(2tqv7GT-@T7{w`+AI$6IyNK3rjj4i@Xk}*X6 z)}MZ7rU^bv_f(Brzr13u&Eg@Q#NnYKuC7Mq)?g#KM6PA?~u&9mtQw7 zWFD(%M+~U0C6#7a6aF->`?zs8b6gL}6}}H0<+qJGh{VjU#o^A5?nuslnv{hP!>M&o=G3*&`fnXwRFOip`@j+7W88yu2&( z&1y_=FfgVgcnT#LU(Su$JJHd3nWsSrfT?8D=dM}GoG=U+PAD}gANE-z$%=;f z{FT^)S}x|di-@qEo~`98N)gw-EXeliOU{!;;~S--YI&LZOqH|pEHQ8;1XmoGKTIyR z_UA=wFzcM929N+mvP296!^FO*>ZPyF^;JWaXof-B zj=EF_>?rvkgv2n++gR#+TSzZb1Zl4vAYLDCISF1blk(yGq)5mblcW5U)Redbj|F!X`C%210R`(} zg3R#ZRA7WkRkue~xQF_A`kOZBrmmM?{rdZ=`xf8Ljle8IGph~ogm-pMyF(dqWWW%l zR>#%u@Zgzr3OtqKTndhppInQ7+eDK(`NG|J?_(-|M+SpVM!EsN%q;ANmIxYLx{4%k zs=^?7-{h5i1WN6#wYHY}pPpyfIot~YLJ!}V@oMGFoeto0bi=)W1x46g2uA<-UkkwdT5i)3Iz$48qHIR0@OhF zgtF$sfxs{5@c9>+LpEkT{2+6EA1PW??>A9A$JCYCl&hYkdP}r;?!AT1k1PcJOBjYj z3$gm8o^7os_s^OGX$Bk_FTv!L9)xL0^Dj%`p>)YEb=P4SjZSOmJX2)C4q!FqV!BF}pAbD+q?n#>Aw;srl|Q>{y#1eaT8zzdpg(zE z?T)5uy2QL<1>S0P1p8dAGS=lmf&Sd)q_OivT^v;u>5$C`epd5s(X(4;KoThGkXO2X zAr9A!-$QIdVSZXBBpO+fxw1eXzCQx1z8GChOHgg#;i)O5T1cUwx-edd%TgpAjddF@ zmb+a|UEyh}9d$5z&KJHI6!j2a`U9h6nvVLzps+qk%)!;Kf_HVKW~5vI3smu>ZDuJG zhbo&4P~a#^Cu!A+mww3sjC>O(JkeH+&abb0Xb+7jKiX1H!y>G$Bn#{DqyT#Y=}#M}L;9BSd8Jx;_#XiaPy` zmZ7;)WjS%XNTrC!O@AV$jXIV%IYI3!zFeRjQ7kMg?H`OlJyzTMTU{Ckg=%aPA1uiM z1T3WbUx{zRWeg^M6=UxNG_I3|$^2K8d@=Jh;q~;b{ayOS|EeQkOm5NjS&HdR-h-7Q zRy|sEpL!l(YJ{~XT z++p!{^Qq!olXW!=V71mNXo(OiCUZ@SS!$mdchZ+*aJA3gRPlHK;0U8tulOEO-T8dj z@mRSA-vmm8z{z^OnN)1VHWF(|G3CP&N3$?$;_Lx~$P^OJrVvX-kEC9SqJ~k`@g)>2 z1S1BS2kKQ{m&7~5wn{rn4YHwc|1$1<6Gu{wAB>AioCx{}9O5BzQxuEAcx|s)^|Ab{ z)9EODwtORXy43wUel5TALSF8_8gO^Ja+-TG8z**e_POw%+?mys#F4O~WdHF$a>`*q zYtSQvLsBS;AKwoe^;nRV=ZYpZ+752#t{Z?xFKvq~$!cxe|yWIS@@-9p$g zRfNm3pFu0@@3-_fiZG-L^o`(5GYhCKTSGrBrrb0T@)4)M;+17oOw*yT;0B232f1*8 z_GtqJ6{FA^={RY(_QtmbXvQ0u$2qn*=LSV^Rt zFuVjdBt<34yWD)VE7#jKh)wcPb*p zMc}F2;OHg4Fv8=Hi2K_=^FT25flBd@+<_(>q(=oMK+#?#Eq6)j*Vf0j^&$2d(UW#0 zxihdiDz&Y0jfGrh;<^m^XDl539{0ux637lE!>M7XBY@vpd`9VdAS}~z3Z!sr%%z zTw+$1`k(-8I>~M>X%${;XA{DyW9&DP?5}^%*XX6$#e&*7x8KL%Gy+&TF93W|T`r0p zySm#^7Vk<_Ex8p|VHSgZa;Qe1VLt*RbDEs( z_KXqLzytzkN7upU0 z)KytXxl33XBg}&lBzd6DvZA7W@!wq0DPR5d!hvMY=BQ>{Xo*9|sk@`g@GkX;=5<*O zzIFHu-3M{XcMB#~fjr_}p{7##SFL!P#?$AN&>-2zzO-_-EAL-+BG1GuGw^|I9KTYV z7_%;uU}6pQ+}25Bi&5Fskx$$D8Lsp`US0r;+H%SvzkU$8d!?LGI6_JJkH)8z@qY=v zUHmSxQ%`xg=L8LdZ96-jC6B%dnmltv5!&*QjRiV02Tm=Lr_Tj`BH-|~JaY)oeX;^C zVPbA7>m6@0S>wxYq7*R?V;{N|$^S(-=wP$du@5`K?N)!quxMy#LJz{C3nA=*{uRMhMW5rs+ggST0q+jgrGFpf=;C zhnLWqjka6$&_&+)-vR%>{WN4{st*8wjDPP4_9*I}s7(?)87fMAxnZh15Z;zLAHK=200doRg4+HO>+vybiry|3Rt^{sR!fX6)>)qYQ*8q~E7n?`-B4UpSkpxrV~0iix?%Kuwe+og9jv6S z*%cKiY`R2oY&%X@sySLR3@`CWhCR{L{5AJKuGh)w zAE$QrP(2M0{Dkx$Io8eWCtpWfaBYx8?^{vv7p$-k~03g!&rFW-1KocM};3+rt=>NS(TN)YZWVxA=cR%=jF~Thxn7_ z_kRDDhEcKbz$kz|N(c%|3W|v6i;BpIh{}kH^9zf~2n!Pk`9b-I3{9Lh*2d>=GDKyN zQZgdagfa*y|C9mtV{PGK@jsG6NJ}8c+DgU-=jLSLp@4O=u*D!;oNeV0zi$#y{?aNL zWt=0<4VHybK*}Nh={5n^cTJPgaCY~waJIr|s3^bE8x}mBg(CsY z`s)9;Q~ZONzfFb4x`TfD5a=yv?{{COhjIPu-Typr#D4clE*5U?n01SjXaA4M{c>Wz zeAGJ6cgvNruv+(K3Rdd|fw7iD{N?WNW&Ps($%g&^-M}C63E2OA4lg^5^M5m_AELff z{ksr%oQ;Qt3C#&&)XC-|6C+ly{`~|H+|)^~N|FsbIl=-Pg5JR0>|~JJX*} z{*lx8UpWb${N((xSp)(7`(FOI3+V!6Du`dH%3rtnmnr-o4}QPW|FK{~ahsD6qOsw4 z6Rr&*A|%*^ix7&0tEj~B-n(D5RDDTn{aIi5h1}QT!d(BINpS7Lx>0oHsK;fW5e+#TpL0}NU#YP zAsQQwH{se4B0_>qxCqhMaJ&iEh7b`FY{Er|#)jifxHg1{kYE!oLNqoUZ^E@9M1%yJ za1o-h;dm3S4Iv^V*o2D^jSa_}aBTxJMU^G@p~ySrR!GH&EekW4y0mtGd4)^ut%2up zNzDL7#FDhw7)MR)Ry0u5}N4OMMOsa z`yyf^{(Z4>68*S{@Fc&lU*ch~E=9NDCnqNi24h)bq^I}w9b)U&txZi$-QC?X zGBVW))U@9GnVFd%KYpB_dfoc;>0UAt_Sg>>+I}&`0!y%OAGw$=%9V+ zM3TN}LUQKNhlzm!2Nkb+f*H>X?t~5)qG1l_kzDdArdbZr=g`a_vH&sn94*=adGj5urOB-kH#*8 z)1R3?w!wOYOv*1Dr^~r@OW(jiMq2u0joUW*{Tv-+BTud(iHV7)XS^qSE9m$Q-aJ-D zbA+xfk18ffIK?%X*^#3dr86=!1&xZ8*b2qP#N6E7v$bzT)e^DsN?Mk@diu1mq~zE! z<||@YT`V@|(xtsq+YT}%T+vLD;MGy(bi61PBjffY{C`zbmbZD3QghUxCR9^T)SqXtgL+MREiEy#>;Dwk($Sj(Oeqg z-KWc%c>etPj%ttI-d-k2=c9)Y+uPgw7Tn9q%EIIE6%H>uYI$R0W2<=|Z^LsYqB(7B zZ1k;O_x080=Q}=X`ug?jWucRYD_r^c`TOa~G+Hi1MP)I5$ks|u8yR|@s;qyq|F$P( z)pnHb!MeISo^#)7+(+%a!!BM-8+q8=OuGJn5#}qesAw5pY7E<59>6T;9n&9s=gw+4 zJ%ak?<1H^6I5M-cPJ4OnW<;F4?a>^?Tplnv*f7}DmH526qCy;r+#PT6jWtgU#a^## zZx2{rYG=;etE;O^eUm!QEhaHh)vm^-?V3jX!Ilrsz2(@}OM)C?fuG+HZ`p&=-O}6F z=U{J77J1c#_70|XFRJz8#ft+2ry6^!j-dK|`)zG)0YchVz-w{Q z_dChQ4<9~!`ovvQURA{+Zoh{^%SSFHIhpzGnU=Y|CyCfZl=Sp=r)HLvh=c@cYHDgc z$HMet_dDL;V=9ZQyyPodurkjIvbEi?QXU`IU zpQiogOYZWFdhVgfU5=mt_y;sTyNK}_Xf_%xwvqSmMa6l@NrKbT*lAl!teV*aIPn=I zSCW#L7#L!VD$C2Qt*wQJ!~_K+U5<5ByADT0L{O(wcetpVn)W?@eDS#yJ9{uq_S?5_ z@s}zqD~s%fh{>=C=HS+g(m(eU(o9yu55;@4qg9?ULz`X-L2M z;+?SiEqcGS+gK+rW7OPapE`Ee!|8Xe>d^-xiHiaZ4PDw$L zHpVV4emsRfwXCcRmU;8$O)$$_w{8^_6l7!V@9m#Q3S8u+4eZRr8+j)8XTwIyO#li^0r}lJpRBCZvUS4gj>GAI$ z@F+mp(a|w55cvMd)vxc{ly%)A(8|hw_I@pKT=YUGc=-6hMw+8p)KyglPgKX8Dx*i7 zvi@Wp9~Za0vcl`6t8Zv{L)!g)EQk8x!-uH_8Dh6vO^>vIVa`N#b#{WfNQ#IYJARy* zf#FtOp5O8}kx9z&H*aDtUv6%aR#Q>IhlaL_c1JRb+rPf;dFap~q|FnhC*~@?Tgl1E zAB;q0WM?A-ProcO1N+`-h19pVUtL|TsHh-g${6aLn0 zC|`P78npy<$GC^5XHRc0pN?u&RMeA{_Ikhhfq?mw21|9W!%c;SLL|HJGqSK|F)^_{ zY3%ay^|`rx+n>D~dKfE+d0;yWas8mZDo;a@^e6 zpy*`e%Tf35-^aelDq;3{mLhLe{7hV2+}GEa;yU8X{CsH4-SL^(*?`rBQGtj3^k)8S zvFIz4!jlzycF^wLy_M~(WXz>YmFD-hY*%i3@PLDZBjiiq>eAraft@S~v$M0(QemWQ zv2GtbG8ev%wlY{xO-%_Dq&whn+)wb!YUSnS`T6;B^74lHMk2TYxdYMMt=Pqdg+plb z3mRm({0e4I?Y3>-UV0m8`LgijOmN`Ci*#Txpe)JB$wej(&dyKU+FsXsAttLDLwB*D z(B@>wh}nZfLP&QVWM*bwm>C&qiPzt{GG5e>CALp$mS8H>|BLjfBFiJR`NmX=h>*QYI!Qz&pnE-H`L? z=<7?$rKh5btgepPvwiF}@0>lEACS;CkDC!3}k! zj)VB!nYtZQbv|FOxY^s zMjZ)Lxo`CRRt`;fyn&(NC|Vv%rcuK-*wpH%wA&!;5}I=d1q1LOI#rTZQ*)I!n~I7G zKbuieUQU)bgF^RX9^saAb8{WJ16dJ%s~_4qvaTB(KOXkblJ>}fXmzW1QuszXk}Zdv zot+H~y5HEy%gaM7YHe%d3W`sC455o$5cV67V&l+y(b2&WG_CJj%oKc`TRX*nw#)ES z!fo)-lt?)YC__6Kce<76!;EZn(s=cRyv zUY56C7ueX?z*8!j3+=HM|x_H9L z&dz(}5sk4L`B|mUql#-+*V2+bGH`Xi zVJ$OuN?lv~)5jOtV*Sk0Zuq$LH*d_z!qU;4w8nGwEn6~+9kAFhv$Kis?#1PL@H%M* zX&gF4jBiw>prl-z$yVfb+1~Z~b@E6iyNF0^EU`P-7pMDy`A>uMpFiL1t-gGjK0BBc zwU}65E|weL>nI$!{0)?@z92pzAiykefvaq4=K(ngc1!AJU^1?*t`6P#sV_zIG?ve^ z@igV9rKPzIH%0DJTz0vVPXRk~_{fo^;lj|Np&@O>m6_F*B}b`bdB4hmJ^Ir=8X#u|4xJf(`kD0gnB%|55Eub=VC9;Ga#LPkl6L?ZRr z>EMrZxFRoAFGX?2q9-ONc{2@2l%vwLQ{;{by+dyEG7KUmA&EfK)z1(3DJUpVxH7Y_ zJkz9e^(ti(5ph)+(8S?BRaQ#m=-+nglaQ``ZTE5$ZTD_O>M zNQHwV9lU#(O4|GP?_u+UgM+~r)g&jx#^ zV zwo-?N{#-dEV8*D?cx{E8f`ZJqRk^tr=iHJGsfDl|nbOGYNJ&Wneq<89abRF|tSj&N zF7NlObaA7AdR zTOU4tTu6@u!;XxMgcx!8^2_CguY*3L%>EvMO#5jeQysg1juN|{fsRh~)FXV%i)`%| z-#XKCb4{SEG2e@$qe~nZ7yt(VB~M{t;pnp)^Ru&&Op;O0MxzhN``*Wcmp^(2ExfWK zdtLywT1j&zbJhRzJyNkn_vsN0IrX>gTCr>Nkhws)mF1C;nrfk{y6p(^B`WoKZ|}7+ zbyIEaXLvlx4x$HCh4~eE1qE$wZJEBEcLJ7PEv-Hn{i?XO?4GOsy56rUGjrQnN;cl} z`S}95Oy-(Lj{>g7lU-5FvSj1wb!qkW$@c^Xh)`+UeI5=O`a`v4Vq%g%cd>amV5VJN zKRRvk3ZAmLP%9sP@mgr%pxPJr}cQYZupPaBVvRr5N!L0%BfD`I766 zcN61PuCDLlT$$~&?)LUr<>sTBnv-B3%(`Y~2-PWWGL6*>4~HO#>d2_1n0;w^(b13r zadqxC11#hy4u>-{W64x*ie^DnO-1fQo{tNFC1uQ#45&jOVs=d@f<8?9q~aVMGx#`n@7_%!gwJ3& zsC6CPa!O!tng0A+@u|TEZBH6LuXte;n!*;6L;YO~gKMkYp-~ST8+GJIUV9CyzCocc z-U9%}*bEC#q%O{e@xeB&4}Ci$_93aOspBlKKt|rxMdaz}IT$d{oL3L#PSQ0h1LZg= zDJjX6O9;QLHKc**=^R~cd;Ap|($a!6Yivr49G#uDJVPfSuT!@lg5>tGONzZc1ULO? zT8eW)OMQbzV;#0P0@OI9r6y;Ti8yGH5Xh@*YhOrx?#Mg>y7}bv`~DhtsKxc8JG#3I z^YW~$jFq*uqm2Zd?d^5-^j>s!j(qVpk|SoT-7E--DLc+m)D40(JHH#heSKSh5I zFkQgr*$u_ez-oTZ9>er$?>bz=N@Y$?*z8K*6CDy~jGx*VpGY@#@}r z2XNK5ZtxOBSv(u~Mvy*L=FagvbK~^l_4#b># z#SH36w{J(O>NzDHqe>&g^uJzR`qslVB*5I0&h4WNQ2_NF1qfmPCG|?yYg9L|{g;Xzsq{J)0+2rUi zKG>!B*;|h5;w`xUmN_i^kuA5taafeuY;j$kKU89m**PIqrwwD}6mom_IPq;fiTBKh zb`cSgpotMs52LpG7$>KCC<+jWBI7cfW16>T7(v&lY1L~WoVlRgGxPG2;^L+&wcd}8 zBJVuw@9LVJ=qWpQ?i}P2Mf;(se)8l=(DE?L-o1Ngt_cbVK)>nnaC3CeDNCOuQ7S-LXaiU_H7aBx_9*|4fQjIe>6mzvfK3n^o#N)M@xY%K6-jv zXJ?`C+M+v)7ne)-(9_r6zOBn_!oG@%<~jHqFSW`U|E0;Qk-)X-%MFi)@*wIi2VgsI9mAP2 z?qYL|g?!;b=q_YpxjB@=kn-jg7TV;lrz7}6s0c{5oQ;^;$td5uAII|Y*JpSU z`G7@_tm^}J0+OD8gzb~2-O+z?MNk&XKILArb0~wQ=dWI&?WCclk#)Vdr-x>5%-%2+ z@Q^3SwnK^V5gIt<*2THBH=c2ykm(#45DKlIqNa_Zo2Jobj}?BWudE#6vXBq9KB*E6 z_gO}G)vr)}fRYt8E6M|lFW$B=dWu~UpfD$gtppmH8H+j4ZKXKlPxAuMyXv+$ z(Sy8y)CxL{qRviG`N#=NN^E8bQ%PfQg#*^E<^KI4P)<5Jx**45cn5y|*1^FWr`r1< zTXW;%tTv4dKETV%d+gXT4Gj(Gg^j2io0|H8Gh6=B4ADZsGrzj}%A7irpr9ZaT~yIM=mBJBX1el=3fECXPk;+3_Z3xUXOBJ%r#Bw6(9wA= zOrE>E>|c0}6fY)*#bOh(u7ox|crXn0Ld1ModiroxCW1~CjV2=vAwe-lKu316uc~a1 zi<$qIZNto3nwrP+FYe#Jzj_}Zb%!Z050AMlU3GYPE4ceBla&(=^mge*MQcqylw@JG z%K+ju?+Yh;`^VnFdsGwKj%aDUcf6~F#<9xCT$5`~oUpUAgMuFEM**(O&7d8}pAI$m zwd>dOV;a87dQA!YvEK-oyXx-?F}O83BA1$$hKq}9=8odysIzAe9Y0Q2mi@I&a*w%?xRmJ%Nfu^=`jti4_sM%|K3JrC-i_Ma0B_DqlKaQ z6mi24!uiu=*{f~rVwlGX;K3Vr;Adpkdr{WLO1{1_Ox#d`jSmlVEQLTS35Db>0r6xt zP9ukT{&;R}Awj`RuY71I*VosFg@xtjj@`Yh1UbKjg$3kklelXTiv*Hwpn|xHv>h5A zZm9}yuzY*}9Fv5jQJGDf%Po`0iqj8hrX3Fyl~}jLK~rfD9bJ9*8_1cVskK+=1iHw0 zu6wv%g*_G;;S+sT&w6@%h8n|geSz6fH9I)eLU4HW=n?6z1FTxY>{MS2Md2-mhAa=O z*VNXA;_(6|<#sM*P!vGQH2VI%u6=)dy9hV8x8^9CsiU2x1Rpy9|)b{E&9f9e5d0_My#hNrrqB^k@o*4 z>|_S5CV=sv*j|croOHzZgwKwn*tN6LQ&YdJ%n!DHn1VcPq-liiAhd+dA=x1lTw8qE z@brp^vYHpv4p7l@i(TT1=QtBHqVDB24@yWD3G3YFx?VP}cf2Wj%{+3rh z0A8No?|uB`>Y5sw!X1!DSe7t|P_R*XnSiPJ`l1COI<_3~rHZM z3){y=V-m%pn8KS4lHw0)vX|^Ww6m{_72nV85O zJU@<)$I~lpp^m#>47~wiVb+p8WF#Y*b)?)#B-kxChQ$NJlVLkZ7WF5?YyjWCY^TYGhTCY#EjN{|ukhyXwC2n_YNi$)cg$)s_Q&Ckt zHVx_=Vj2<|@R+2lE(ToKPD;vjk4G=fg6b_EY&Y=RCd|&u5T;^jW1}{E+sDuEa$+J? zk9T0;D&*KX63$juAxpv6sUEA_+3`fKu{t?TLJ4?~iwinC_1)%rdU`0-mioiPa4ZC2 zV*Q8)n&8kApP8G3B8-A$i*IN&i{e0gdwXMJMni*{re;BEDjNb3J(tVQ#FPZ?-2afS z5Nn1RSfz2j@2txQ4G6msd8Wv+8WQ77Ql9G~((}k(2t}Bb4x5!Y8^9 z2M;#2x6>FhadcdG2;E(7?$PvU1hx9uyFML_)ANWi&;ny)$>w<-}o=_Mxmb=3OnoD~MY&spKj_0OZ$ qU+1~MPjcZ@_kVVh`)YWNifAdCIPQwKIP?17xz$kBQYl242me2ZSGZgN diff --git a/dashboard/portainer.png b/dashboard/portainer.png deleted file mode 100644 index f599f98dacfdd5938639a24dc94f673dc937b91f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7071 zcmcJUcT`i`x9>wWQY1)=6bT^6Po#L1-n)qO1f&ZnARQ4x2SbZ=&p}ZGf+8&lp#{)T z1Oh4?nh<)ZN(lrI2q2-n?fH#2-gy7q@$S3#{*jFAwbxv0uJ4-J^RwoDVrg#7$tK7K z0)aS9ZW>sFKn(7`KVVj%kyl?ZFjd)g3V&2px zJS#;QV_Qe3$TYvFEpJ|e-C?Dm83abYY%~qJq(>TJ`>tKG%I2>FDWdUy8m?D$rad>% zKXIl-kepbyXU}ImVU<7+Ik3%{O#W>Za|aJ+AcR|n=252DzAv*wMLBgLMWMW(!t@gp z5SPoOo>lj{kmlJIeQ%8m_@H4%Ytm?Hrr@w}Sj+Q{1KpJ4w+mqR`eeof{A9EhzRrf- zqRiYnzeFc?viuAjqMP1r18UR5SYR_PWvSDdH5d4d%VW!QHueKpPr&Xso->ZewrUru z+2fM~mGwW~4o&Ph@Z2<-OM@9LO2<=Eo9HccXwheQg-p)rJQ>{cS+d`ogJr808#mUe zeF?f@L`qRRnv_A*>?FB9pu9L>NqpAJfDp?RSw>5+y|kd$K0e=r!W`yyXEv&EnKVX@m}7AgLu7d9 zJ+5xg(CL!p&{Usd=EXO_f=B*j^r@xDL>{P};>trYBUlHaIF6Hhtz%V*wzVlkAx@fkn0SCpy9rZ*IAy@o#=(@BWs z71cJe24%Q_Zn}dNtZ#H_ABzxAiPX3k5>YCnBA*%H2g&%ANU@1*s-YY)>$?;$pJ-$4 zE<@S*uD25y?p|Vh{S05DqkiMwsZ&;H65}6T<>{H;uKV6~rJAE5H4@>3+JJ49l&R0R z7RML0%QJ0b3exl_SrYE>=Zwprvh&rd4aYq6$5FnxKlH;D=n#c_bu#8QU2?EC^DkP3 zN6%bJU)Hp=1J2#DZt|Wmys;^moSnngjGT*$?Enc3|b%oxw{V_X62iogWx<-ra zYBdisfD~EtO0Si*mZsPNJEo51N8kj!L;2Pu2hMLp+7u3;GADqzSe^ucva%Um4nIw!rx7HS{=Mbu8Wpi_u;xbGIwk7iYf4 zR;*0Vt^Z&`8)fdC5Rvt~^5UF{f^Fq@QLTXT!$QiNFZ0h4E!6>at=e?rUUE2K;I@1T z!T%y9>A-ZAdNFX*7TV{tqUACCWZW!IEot=|EOvphJKniGah(u z;w{C?=-ho|i;4@rhH<51rCYE=E4!XciWcPXMXM}5w6fK!p~I1Hb8U?6>S4mYT_YFe zd!pUc=XX^w7QS{}nlJ9YH~Qx5VfuLE{TSJt?j)$1R~gZq(p_7uPVM~MW~_|P3^qvj zj@+p#t5uhrs!X*`+zLdT>Vc6)xM5UwKBI%M>PRZq2HrTyk$WUJF3li+quVTSq_Te1 zOd&@kW+Lg#Wf{18V8_ebh!R}Jv%?0{5~}_;<;&2GUM$b<(M2-_{^WMzz zGQR<*zK4uk;T2ldiC=L2>hkwWW&C>F)}!YG$Eyz`R-aHS`@rta2C>2Mm7H$F1uhp< z^JGmP^xBj(9kQRD;NW}ajBheQ&HKHYB^^$vTHtL_KOIt5rK`8_q3r8mm9FD{zhFW2 z<81e01>W1Ur3CzHPie`7a~-wkqg_Tw)4u(WMtYku{$k_dCWkRm*3-_s{M=w?k$yCI z$~q$~tH~cce^?`FxWi>h%}I(WJaqb=ZG*^NUG_Ed3k0%VW6lgL5Ik{tXWC-?PE(RSL1b8t2V$ z^nmo&*={|<6?z0lsZM{-&6lJ11y{;CrMVNq1DL>2O_IB;cg*i2O&Tf|_RJOD$%cH$ zpQ$bnm`_OkI6ecU12od(kbq_Lu#xz2Rd$)^@b?G$Fo_Ny_cl#V`N7 zO$_g>X;r4otzBfBcx$Zxvcw@Qr!5foIm2V6BEYYI3wwpYvs>5S0zYpz`o>Z-<~Yjx zdf?!x%lH&oaU(QHacQ&urRrQ96(b+^8lF0}>dWC+=eB5da^ik`JdQ80^2mDy`fuki zWiG=Z)>)O2t5X1WMG#-~BCELymB8-2{@sw%Kxb0u+5H;(j+*#y!9I+Jj?>vOpd>N0 zReh4=`r_#MJqF(7|3~0^uV}>$EV)6d^WEF-P3p$S=ufRB-owr#tq+wS6lC!=a}{!f z7bpjLKA#ysZwhXO+mT+fZ1nzZ>+Gl{6pCP_nl_JX)vh#9Eu8RxjT}bBt53TYa zmuJerO>a$(Prv zdRdessTsA8$5DW9Kvb?Z(EkKr|KFI-(=7B!`}j7#>|Km3Jl}Ck>*ANj{hVqXf$gHG zG_S4`H=v=bbg*mggAjWY`bS#~S5A$5VU>y^!@t45yZi~;%Icb{8Rm48Y}`kl2KYH1 z$@3{3LhR@5u)%+j*lIzSj6J-b^wVr62ISFIel|{&Ju&D?!EOKu%7q$#nmIQ*x&+$P z61<0ODH(3TfVxv*d!czgZ;?ElA`%;S zks&o4K96B?xz5Czk4PTbn%i?u8a^nPoPcrX5BCRFBaFIERSC^P-WOZ~fB%*n)sYs! zDgQPZyR^yb;CWweT}K2${NXS`c+Bt61t8`P=HL$>0Shq{^qDcFXw_wv3{nt8LCt`R zZGh)Rbklo)v&Exh;AB(;q`pTM5z3(SWSG$~H?d6PfA?E>sJTYgVgIM8$tf#Bc~CDa z@}GWkV|u{EOR6NM8MEI5^wP|JU-!&x$eEvcvkhK&Nd8XN>78CAg9gu{nw%Q3ulEDq z8g9@6Kr76;PU6ScC$!p66Of{^4-=3~I+vxwfw9}b)TR(36|1hdm$vLil8BJCt%TGX zZYzIjlpj?SBg}hYf9T|whm z)*o(+eJ!A|OwSUe)=lmk0pZ#iM*#>0QLktH+?;aoN7_@}#0h6k?CqLR1gvX8nA z&4~0^OdYi`g0tCW%6S|@l(ZQ<(#NE=(9!pdUxgsnR)0C=TuD4rH+CyDg91NZHp1p3 z+Sa#lJj622OmvL!`$=bfI#NQUw(vNS(#D%Q&~>gv!dV@3N7v&RKipP?!w-vU_w;t( z(Z?)yDwIa-(q>bsJkspC>z^2J>TdgMPR|l@&-uT9Eh@a``YpuC46_KYsIV2XtiVdG z3{X3p>4^OwQv#x=R-u?Z)pduC!#cxBVeGZoDyp*Hb#q!_G1y(ZG>LMihGju4Ke<{j z*wDYhrq8;?P|+i&tyUL$(!5C)478)tm!7f?77UDy`In^3a!#Z~OOak(+)HcE{Fr;u zQmJB*gdHohy;Uz-FO1|1dk3;F>^nCT!?Adt9`_E0C(%{ z5hy?Uxg%ec^_gHj;I5-%7oRs}wbSeNGmqa!R8Y97n`@oXWPi8a)zmcF{)b|lG;U(% zu2g=}>UB@%dQhs^a|su|i%2uI$9+P2V!&%(Jm?FQVn|wHdq??se@c{p2rFTXE2Um`I5= z4Lun17c9uHYgm1>gRsj|!duYmYPc+8tndr*+T#BV3vMx?7F4_FHo~Mt0|7~jxJ7mQzTNnT&J-{oi?Z273H8!}8{ z_=3Kfjg4rSNo~G>OI@N?s=L6WXbt$X>U8l~WXz_Nr5ST@mo&5T*pjl?F`rMu#2H8J zi|(fYKs*0zjA^F_N;s-K*tqHIU6`+;Xmu^}FBe?jJj>?Nw!)Eho>E2zM6UZQHV}AL zhYJvl^@9zZtbR?NRniV?B79dqMqP4;E>6p2@XPaJiIAlx;cHS68F+zxn*|@VB z>g|?0$-hnsm!Lw6UcSzGKgi5Sz^$h#Kl9LH$?CH0PlFVN=g6R=EvfexSe9MH2*y*j z2`69VK7~u)YdjlyV|KD~)qgL^S-SS7TkIrcaiS-1jDP$OwZ{BbN#F(k-mK(J zwjWMkQ~H~Y-HtQ?b>+;>>ZktJ0o(O`)vq*t>?whs@<(p6ZvC;AakR*)clbvodc^)?ATI+pdy2|inx9s{3&|49 zc`M%ICFLz-0vC||r-PVl5{}L<4FFvy!-@YMEEw-&3V6rvN~LZ@b`#D56Ue>HO!hZl zv7L!2REh<%f2uJgH;Z=v8h`G;rTH-haKcD;2cVxJ;72%f@DrePY4^?ZIa0JNqoP?z zM-|B8eYR%10gU*c=EvJEUBI|m0F;zUD)3iSXswZ%Q$KfXNq!sZbH&>a0nYWy0%V8V z31IA+M>jboDs+2%{6`~>k@yqEE-!XYprlKy#sF>SbU8*k-DWroRhX|?ayjnr(guKO zx?t$Xhbkk$k`iF76b}IrzMa?pu-0;&nA_IJ7$nclETH*^*$gL469p~HrK)HSX+-Py zcF6`pd~2~>PAqA-QX@o2-Li8^C`DfPv(80zetnSR~U;K#{TN-?x~9I{{QWG@H}7 zwqOQ`|A3n)NdaWD{SAXU)XGgV=$kmYw^4_dgXAewJVB5Yn1jDQf*-w85{TM=><~3b zU;rkfzbK;Y1JW2mlsR10t&Za@+)u-2rKnSE9V0B>bc<4-bEH;0!1 zcC<7V=KgJP(=9qr$r#u}2m8VT%x55i9hhs{*lxO5 zvI_vwy9-`mvxLeId3pf@MU1U_CGi~zqG46wmiCR&C3~RpLEdw30FE5a0qcIYrw_y< z2dYW$5j?sDeQ%xsb$eakPw)n}!|68DRm0d_+&eGI0V9-k&lGyUc8 z`~Q)N_SFj{&iYM-eE? zYg=KcdP<${0;I`S5wU9+#hv6MS3PePR1*Vl&t8m%*u6IzosB}5w&jwxxSBCQV7fQ! zxCJ0NCdLR{i4oX+1+OM&72|XPkqUqgKR#Da&fKoY34i=F!@k5~`@4V=$Cz(i zCQ@QcAF?RP3l8(>L8h!rSN8iIj+sYlA+aZgpB*cf%3kmLuSo`2wzwQSuad#h!I@Bf z%Icc7B?cF{&+TT+GL33KwHc_md{7Wu;~G|Ao!`3(hN7UI*4{Bjm_?v;p7qpLW_U-< z2qS^;%^4eF_p|N!7j~F5`$LGthe#PyzS0_e|dI(s;UF+kk~T5 z7KZq;@jkfq@X%)A=MaSXno)7}Bv2Ci*YmxN`^;g=>sjTk3NsN+k*~uN{dzNB3wFoV zAJtR5#Moy=D{@o1RnIvj!ixvJoF$%9&BYl@Tz8^*+_Y&@zh6~171S*e_`kvgr~u)EMI7#M|BL9TN0~@q?ez58qnGgW!}Hrmx&Y z^jjS*D~tqO=2^8m=7h#ay1+J1HyszXWJ5ts{`CTQxgttBhpmKHq=!fo82fD#>5ujH zHw>r@&wygI0^5B;;>ocXfGh -
- - - - - - - - - - - RADAR-CNS -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ZookeeperKafka BrokersSchema RegistryREST ProxyHDFSMongoDB-ConnectorHDFS-Connector
zookeeper-1:2888:3888 -

PLAINTEXT://kafka-1:9092

-

PLAINTEXT://kafka-2:9092

-

PLAINTEXT://kafka-3:9092

-
http://schema-registry-1:8081http://rest-proxy-1:8082hdfs://hdfs-namenode:8020radar-mongodb-connector:8083radar-hdfs-connector:8083
- - - - - - - - - - - - - - - - - -
MongoDBREST APIDashboardSMTPcAdvisorPortainer
hotstorage:27017:28017http://rest-api:8080http://dashboard:3000smtp:25cadvisor:8181portainer:8182
- - - - - - - - - - - - - -
UDOO 110UDOO 111UDOO 112UDOO 113
- - - - - -
IP192.168.1.110
UserKCLTest110
Empatica-A002E500:07:80:1F:53:1E
Empatica-A0027E00:07:80:1F:52:D7
-
- - - - - -
IP192.168.1.111
UserKCLTest111
Empatica-A004FA00:07:80:1F:52:F3
Empatica-A003D700:07:80:1F:17:6B
-
- - - - - -
IP192.168.1.112
UserKCLTest112
Empatica-A0050500:07:80:1F:53:51
Empatica-A00B1E00:07:80:A7:BF:7B
-
- - - - - -
IP192.168.1.113
UserKCLTest113
Empatica-A0028500:07:80:1F:53:1C
Empatica-A002EC00:07:80:1F:17:68
-
-
- - \ No newline at end of file diff --git a/dashboard/tomcat.png b/dashboard/tomcat.png deleted file mode 100644 index 50003ff9b63d73321ecf5772b58ebe9e2aae1df0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18593 zcma%jb8wvR_x7`~ZQE>;#*N#cX>40YN+qP{qYV0PB@!QXL=KcTOoo9DvXLj#< z53h5bb8ndZk8j8b1PA~CAWKP#D*^zB3H*8#4hH7=aXZmN9SEf*|&lK;{Vqzq5>TR z=h%b}-tYJlOl@r|KP!k1#70`VZ;uTEbYWvbWaC54;P;s!02k0~49O612#Rf=FLf7( z^PK@D=HJ_}Z4}|u)wZ%H$a@r+3w;88hwin@ve1n}_2mX(iF|YkU5Iw5t8bFa4YN%JzBJ>KCzPuHKkX+)T zn3$|5I8$A2m4!$k@OCj|1ar&`T%XL&Z|bZjjScMdnnliNZc-5D^?y&@(E>thc;|kP zAoU2o-%2PU@xm(NH{G*?6h_2dy^v~zcFQ^S6#8~& zGZ{&Z)84O-gX2=fjK@9YCp4migo=}68lB;)45XerP6!6z0?K`z4+?G{vym@kb)pwH zl1CXA>Z{CVI94aak8R}+XQhMDRr%6PJRx|!Ge9?FMxG8bv_83S5fFIT*{jCTN`Ofp zwd=r(iYLd_&|+5|?4-`LJg@Tts~5JD>^p-UMUeW(XsFE;;t?6xW=VEqfrx$BMV8U! zZ2dMM0Kg%}8FOSC65}9FP&)3(F=qrHf_h8)WEFwYDxPgufKbF#=O4Ncz%Tq8HX@a` zP6rPV50YZQm!sb3V`=gKZ>=3jG6j!Zn>VY0olc{`1FFyl5K7f~)JADa=`|dSxE`zL|+SC9qK>F-P6O*ZR?Wpop40O*%W)%acNJ@MB$^E#}tv zKYh@Xll`a&o)snykjdBqa%SxY;^o$N({(_NbpvkS`#>80F#UB8Lm{iA!u+GmO8~-+>n`rum&%VIUpZ8-K z0(E3|8S1eI^0NisYL`FWgvWgg$sT-b@)Fo%L8VX%rLlfkG`NtXu(2ioZ0-wuu6#$u zt!BPpf{-}(HNV1NV`h6fS$Y)67=Ov3RGL^?7t?K`GA71}C<^=p+b>G>Rp^^yU9ZhZ zIjzmTm#Hk$&kAfJlpT{;eBDpJ!d+AP$qZAVW(szN?GkLIIZy`7sG<<19uj#;sj zzXi!K5{N&QWpEgngHcU)hnj|S3Z!*Shd#RUqztm!_d_tgFbHgUDXD}i9Y0g0Shm|j@ z&PP**B?TkZK|pWY07P}^GNVbj3I9K7|Bl+H+)HC#A30FWkJu=_f3qE{%ewM$D&=6Z z`5Je0#MqjEhpWEWCYxApW|`UO5t14jiY6tNieD3klwf#XTddx-Td#qOgc_GC%h7cX zu%w6ujfg)6gD;AvrkkXdx61j-1zd+uF_~`ip3kDqho@B#7P@gve#Np!k`h5usCpZ3 z+8m%9t9uWsn_a@c{kjW58Q^%AoPU)02f>M&$w6SzgM$y%(@=PR>G%GM_9SDX6PLZV zfqCrAQuPHmJ0n{%U2agbFnm%@2l%1lw=Y+)t61w-E{yzNBKM3yrX zXZ8u~tIm-kf`)XFF0xje9ydSCn}S)6?CC;y=)^kziCSi%M9l2#3%9tItnM@7i2uOf z+AP)))GO<)NC`Y=A&=+~w&P9Y#XoW`P_xX;TTMjSS|91%Ua36fx{OrXR{Pe^$CT_o zm3dM|lS8_?wH!{-(rcxv_`g_IOyK*C!&2U8Py&CO6>olm(Rfa!`g5;tElR|Y;+=Dq zv3=7dcJL}BNlL&<&YAgjTPvMVqxl|o67%G_OH9!JG)C){sYiEvOlUZMd=eE~H3n=HLwhGvs(4{Uyl=_W5U1nH*Fs@iy#x-h|ZAuJp4 zqL2c&fU%!sL?(PkWMWrHQfDgk`Bcyn6lGJ96YH4}QMG-j1FyX=2#mz7B9Q2NUSBlv(&0OzXX2r;i< zNK%rTfy{z(1aDe$P)rKZz%pb@t}0>o-H1%|ZdsmeonaqY1Mv)_(z?;mI_GTT(QwV8 zWmI1Dn!Bv1K+^MRts&?RW|m02t{z@e%f{i$+7MaN6*q}sHK_Id_+tooht2IkZf^3p zb4Bfii2d#9#4BZ{I7&1+Rbwk{o~K>h%Sd`TAa}-+ssj1AvTW6wtPomYl@q*LVqLdj z8rFyRn-)FezSyI3fT2Pjk73I06gQl*j1?sUF~8^yFSNk&=Rg|-iJ&HdD+}&DBPTNK zHL@?aaQkp&pvOj(u|?E%1=m{oj7VXOU=Sq@NRhP9puk0QQvNd^J2rF z=oqT{dpzcE-fGP9lr3VNxo}yhwxV)`2o0B=bhFk;R%~$d6|{W|ys<;_=VpVGecZh$ z1+i^h)T4m!Fl^Y<7IY_YeIMq44A6zTp_Al)yv;Ilp#->7=^s0s<*F_`n!HhWerglcq+WSDI!Pi3K{e&8 z%AC`1jT-p7tkeG$@P^eW?n1me_RfIP=awANSCfGjntjhJZ5%f!BcL|1zXNBws0IWC zG=qg0IsGTv03^N&OBiRlUc`M#Gq!XXoT@KY_go{$a?MlbxD*x2?6l+ru5kl@oAFh7 zd>=$KMbkQl98itTE~g7Va08&$CWPNtsmV0uK#|{=Bpv*MZL^`6q4g;wY5$afi0tN_ z(l%k9xa;ckkbe?a`(kv&N$@J&VPBGOyzjuoB_XGMyS@xQx-(V9m7`rkP zS?E3VI+-;$x8?cV1m)=Q7r}&LSRS;hBii!E4wz7AY28o(_}{BrDS9=-Pi$vS9Q-s;~mKA*S418CQ6tM#6VYtsk- z1|d|w+rGSZXLiUY1b%fPZm!!tnn(JvbK)3QFj4Z#4zi3Wy)P-~XP}*#3ZPFeS?tCU z8#u*W8`A*c8$#9*G#!YzBNO||Y<8BjX@HX1otR)YgP52&c5`S3Yw7*|*mQY3|9O4y z*1YHNuWZS`C|i8{ws3mtgfk2=qQH`wT0>zB&J04gchD%xhGndESM;FD+HXu@L&unb zaBDWz{1SJULq+E&SN>t|@sm<+q%XRmcxp?Img|e?E~MCho7c zg|s?SlH%4J?H*=PB&yNLYtwwwXoG0Em*Xg-FhDAj9*CrMPfEEr4p}1V{T!=~fr;-c zC!8!ZUG6XeY4k7|b_ZG$BmyuT95~%k&R&yEZw zSbIVQ(QP%*c;@oExA9{m%-?V;%zI!JxI4xRXr@Ou&msyQgaKVAA^a@Yn`~9nhVTk; z8aTylC(k|vt1#K`yO|2~4nA;}X&uLM1~5UeEd+f=g0|bV;d$g+0WF%>D~~PNK@X{f z?*MUHDoD6kODvFWKfYUlZM^ei%D}V!PJL=#j_QxW-YAv;2L-gw#?a~_-z1tDiwWVn zc)P`RMJtZnI`ti-oho~z9lr0bZC6A2p%0miqM9{r4~%|lK#g7Y4M&SX4QcCkHJ_2f zHn=`b{1b;zWwWlpN#K(e#fOs{@FC+uit<6{O5l(3j^$h0Vv(bMS$ z(DGJNlTC>HGA85l_sFz~mhy%fYyJ?zgOlfvWr$ta#sGsK3V?tK2a5au}*dfjn*WUj+dl-ZgMl=*zQYIl=&uM*kTq?nkwEpnu|{a*b9X znNJQxtaiF*>|&Tetj)9xna0cXc$c9)XwbkiWq+3JKn(C@Jb}i$>4$N&Bo2bMxf8Bg zus4MFlG2&LoL3yHth=vDD*dKbi zK&7?)c`5J+!7C~MVtlBKwZNI0&U0COajW_R!eJO;!{E@Irp+0K0hgXk5MbMe)$P%fvouzn9Ir+< zQ2Eq>tfHA|8xHz0m}RCe3gi&t<-o+l4(_>6iwHHB1`LM82>YP>Lx{uOQGQ4ddcHPZ zQh;F)3_E)CS$?*2Q*&gs1m?(eb@E3NOj3H`hpc-(uVzMGmkDDDtEtgVjai-Pgvuzk z)K# z$TDGN9QDnR1VCEE{to~=NhH-t-S}C4ndx3vukmbbnXbp7`N%3?!WVGe`%>IFNZ))YH{pCvvS!n}VxNAqRsB{jU9R2=jJw3n!6#_zO_u(C6Iz?MrVECK zshE;0QNlrDVpR;^ACYxBKy_;`PO>Hy8F9$-7kAE(IGw(&ZhE-?F&aWv74q5n^>-v*BG$?J+^gl8uB8}4z94Rvc`40`) ziB-uZTkvp>X5o6z$l49l099WeW~Ct4Nr2zwY1=}L*hqKl56-CV9aEZ))DXnoxw%@I z8}a{j`42{cW3Ra>TL)t`N)er7Lsn!i7Mg(C7qn{k3DC80QaIn`{zBjWz zA7`RdyF1-q16(iqo+6?f5m3+!UX7+Qi{s^|in~s@f>ywBm4~w_Us7g>lVS6wDg1kK zjcfO(u*0y{{N;eA2RR`qr>&Ftpu?QzJqk2cc0Iv zuO943huar`_KIHbZfnMXq3PxvnGb;TGKbuI3k>((eUc*#>Ph6|Xsl>`Jn@@PO=Yu2 zkW_f4qzim@h&f6sP0OIBOQEm*dxfa}^Z?!X17)DGu8N57tUW~|9Fq1D1b9+IAUR!I z7b$CNT^6YFw`hK^mS?mPqBBrcvvDX#|1rEpgrgL)Pa|ohgtGw$QP$&0Vv5Bml+VKu z=;8cbD9rG?jTE9D{Lp(6Vb-b!%ATdjc-eVhYT z2YChy$`xt!k)3^;9>9QtUR7EDO$7${dIc7OQ9LzKdZ5dz+RjfYgJE|78*wjP>Pm+^ z4tBrb7n4?Hj{K0;DqlHv#F?m12Ct1#4&pR`3$HG!Du7`=k=dX>w|%6W$U zZ`1cDo4xbG-6}tSr|2$t)Y^%nxx3VNZ$!75UkP9Gy5lN+-B{)vKtrvcsNL_`SgpUp zxhLt0u-Ayf_@pGXjd{2Gvp1d}lW1MS^VUc1`7;Xq zj&nl@NbX;fve$=P+4Sm4#Si$S?=ITxwV$_NJ&O-N7Dk%vL#_D|rhnz56HtBC%|p_9 z*!Hhfv{*4YPG?qQH`^82oM7U*YrIcXf!yKl&{>tWD-mE4Ly1U=E6Xq?BlznAnhdfq zqqadC+qk{368Xr|z)oWG(d>`bWI+n!b?Cm*-- zx#!NlPx=0$+%m8Rt_m?#ETe))4tV|`{#^{JOg9A+ok1hd)D-*O4%{iEXDOjR&YQhW zTG0PtB?10nb`AkjbW|78?Jt}3KnA(XbY6OT5oLO~2~K zMW)rA#3>!2uF*#)$LX+w6vOkW2#|j{$1gm~4b0LF z+a;Va1Qi!9G^}wpL`=*%VSeIkiLSRgZTl*=Kf-0<`VtX9$Ffu<83#@&^K7HpDGR&a zs%1Y4RDBbVIzJT&9`?*5f1!Bk8Tl1l!{_{NS?!E*B@4*z2HB4W+m=upWc$^jEeeG! zA@qjKD&BBHg^1E4KhAT$ny=T#$7azouu~4(O^C+%?r)Q8w0``-cC)#-!N0l~=WTJT z*CECU5!(&Ask(9P5ubdvWqtpxyWx_TujrRpsPFmogS5lhOSO#a@#k7tdT`lN<4=O{ zBt)13uL!;d>B>EW9~!$)6>S%I133lRJdC|v$$=U$!)U$f+UheB$cM^2&=hz=`(og~x;9Y5=O z^Dn0&mr0*5|J2$eNAh0QTz!!Qc>?(uyx3pgX&}BhU0g`LcgVeCt(ni{!{yvSQtzm@ z<@RKi^+$O`Ul?z*g|Co(iFZk5XG;CM>>T1uNwQpm&hjfKT&<+BMnP_rwS2IUyE*@( zu&44eq*T6~{h@>Mn9ZJRT6ek(|7wvB!HipWxP#THTI!cAR6RqH;afG~r05Z;{EYMb z7JcvmWW8X;)C<`JrA;7YywE>%B-$wmz*S(=)h%)RVr-zWi(a|~YyVCK`mmaA_>E}d zpg-H=dw81M=`V`(AW^CP7GftC>$a4N@y%L29FPuerKf}DHlcm)*&t^-dMA+kJ2saQt@dE?8@d0vA_Cr&=3U?UIm%+k4*{I`(}M*8Plz zxJS5!a;nqv(guQcxG!@gC+1Ztlp!Cne8+xZ0n4CPlR7`^aDHAUUnayRGZh~Y-Wi1! zkrXZv0cvD-1|-OR#^q1a(&V20?Z^+C=rbH}c55hw_uMP1H~tBW*fF@pU8Xlqp5Q#@ zo8py#!)^XiS5}fMUDcPrWOnnu64GRKJ=Tv|P+0jKhYcTe!3HLq-(fnR#4$Pb#dY#^_}QPzD0Q6h$eT^=w3e zCv3XCDzHOYczsXh!{oz{HY6U?H5!KkKf*G6FyA%EM*C(lrOzen%tqUyV!sFjzEZ#Qp zjLVKs&Kya6)5v7B8wfZACY2nP&#QX}$q&aaoy}Ewv{B7kM*l-NCj`Gea~4y|_A*NLARQ$p zQItQuk0sso+>?N6e*C*)4PZ%gqpvy5-nW+NzX80>BL6|x&OkS>MqH9Tnc#5aBt6Qn z-bwI?zA|UPk@irHY+?_Mj&l=JhQaR}o~cD|hjzK?xjEf!xJBhvu;lXfm`FQU6z2t7 zN)k)kl@Zza*r5qkOI4u1K>Ct*5m3BYvxC8SB|ai*ocYVw3&EF&R~MdbzPH=SgUm!sZMI$l!^Ob&y{Y%3OaE86ACSDq9(!iiS< ze7vDc^=x7lgrO;MDM78G@GCt?$6}pr^|`bic~IghPeB`4w(dY_`ugyOFIWn%>cftO zQD;P*EX#iW^v~={fNsuuR|P(w#7aA2eZsB8b*MOLhrC3_k=P-l=E96MDP8#56%7(vL#Gi zKYqSSVSpECJle|u2^#;{rEl>frqy}a5+lEy})Pv96TzzyARca{0zz{eRK z>d6TaVXD87Q`OAtJL*X?T*zA|+A@QYxp`hVj8niGwOx`a-bvD|+%;2htf=k+&Xz?(5HJUVbIMctatsEdu36vmHWLQV? zO*;X8m?Dkd*Nd1@W69&HbrX8tA3gZPt{hYX^`0-kbm4HiZ~_ydwpOLA;|PRo>I5Bk zX?J%9PC)8+nrNaBlG;DP`ZmwpP@FmWk4~I|tr?W_S>dY~gz*EzO&+G~jhbN%kg#$p z7GZMqV7d*dm^zz=wAqo$&dHkAPlhXlI%1bh15oe6EbFN!d@ly>s$nP@usz_4<9%>_ zx#xA8je0zAkh)c=44V#vl3CS1?=x=urm&S0D`zm!&lZ6pE~}te-DlR5tbZ0K{rcA% zPLunyjRu|C{G)-F4mLSWjn?~&H$E;>(%PbG+*%jk(8*#7_%G6J zzjtYK4DZUPa+MK(N#E+44n~I_N_yOW?Rqp?NJ?jqiw$YfI}U8om27HvTHCye&Xum# zQ~WoOW3fhEq_TeH2Sc^CDqmH_TM1!1HNMhL4CX^TMdJ4xDTi={3)Qq`S<%caYm3Et zORQ_M2^Et!eQj`~w9{ekv#Y^BNILFaILMn={Lwzy>w)Y(r>ws!ghAjT)hnK2YB%NA zK}YvS4bYOkIOR*0QkXrP{{CrO!6DLD750SRaq7_SMYL^c47S7q(#2Xh zX^DDKTC3TiamDjpp(CM|LHf$1VON-4gI^=kn1k|tgXaVyPZ&i8=t!J%Dn$#-N%F-m z)CyzKhQuPidfHYc`PBL^OZf*AqK@Ausm-rHtM@zcTW`98CvxCC8jtsGc<3tR_j}T5LCdTLks=DhhD@Y# z_X)X;{$ERj+%%kaznlGovvlQVzXFT%lR4KcGjqx}tVHE3y!nPaoliUbEm8eb{1X3( zkVbHhNPv(^UtE+V_(4ki0fn8%^O-!I8rtXv_U+fDPpRsH(DB_AWE9atRBS85M)?mSTMx89Kus2dNtY% zAR#uoDF0z8XPhLBm8?uo)9-w+4ByZ~^x?IT^doXN3CM1E`lB1ne~DpvOT2IP#djss z@fVA45b(Jd0!%1>V^!uIB(pI#X0@yNgY6p~D0Z^2^KGcmM9?7Y8!Ut7s}Ls{MClywo5?tnUnm#BDo?uLL!_DX60B z@X(3T&%UBXVV94Cr;O91{Vv$xs#ykB?nw(_Yc>+YIHmJ<@^)(f7O^VzC`;I9Lmubk zB!qe_E@j6mf>?(2W-|qzt%9hVYzNw!(L80hn=DhaK+kcPE!w$qRqZGSOD58*G2KT$kc|X=gTz|P*WI>dpCPUwW;*n_eaPNyeW7Wn9iN&>Ayxu^S(DWa31t$6WN97My_jRwUO@nGGsGup6HnEqagiL zTY5TRgr%+{XnBV+U$og9`*37;r}@5ByEtDJB%2*t!USJ|59rexos;{T)-nMfc#^g{HWhMAg)FZx60XAPWVF+kjG)Yp>fSg+ z`%ZyjJLcW4tUK88#yOhzfd>D89|3&7_i%Rvl?at~iCR8dn1(#N&dsw;8p?-ipyvS@;xU zt>!DraM5WFt5L8fM|#@o@`>-lPFf7bi*bDdJ|2q;Zl6TFH$!ov9?hCbs*r+`zi3Jn zefs&7j<}+Sw>_>#x6}4d^}K6x-aoNVs4S5I`IaLIWv}w|QgT8ufc7^=7@42*#abMJ zxO7|fYf=ly6X|U`<8ocG_3ee*A67omN;60BMXGOiLG900xzC|I9dGlecv=kkd)DuF zZ12BmU#<#r!aIEH0>HCB?e10|jgNp(+%LsSls#`#4Z8#oV>m-aB79^CiSM7MqK;ms znBfFN^`*jqQX2CCXaxnfZMTyKgi| z$#>pH{@dPcg{soh2eO78%=E2x9kZH#{2dO@iuVDD6pO`7Q4Pw@_H}oySc(hy@-I9=zj{4r@Nmr!aUv5q@Q&EB{jLm1vOS zQCuDTx2@SSRg<)dSAOnDU;31gY<~W$SW@?}+7-DN*YjMyL|}@cOYIxSw&{^D^j!d) zt9sqgxS)U0q4BQue*4e^L1GXSA!zhWA$F}4M8(pd>|!{RR3x$oANyr_65tE8X5ZeC zoy>k@och2al@u+{_H_+^pubN%%N#b_w($E{x4Bn+4053pq?;lLFo^tV6l@D<)7r7< z2>gx`FbLUL^7|dw6R~ehh49_6w|&^28DL1*d%U;sF@(i=RaC5&&!-8{>Mi+XDtKU~ zq#4AW5Km@~2R7a2kMBv-m*l|v@D{h2jWJd>0dKAy=fU8w<+R)T%tCF$J2gh0otF9{82S30ZC ztatJF)8Z%|rOk|CpO%sseMF|+4KF7!GZI!ENg+%{N z8+w*lcM)+m>l!EIAE)n3lmPAX-oreR8G#YJnhm34I_Pg(D=m)QU*&r z>~i{BvlY4UsT3uMT17SkS(DuBSKs@>@^o{x8P;JCcsZk~_0b$+(jKUIz-d{VL;v!xD|)XV2i4Dd+$jWEAf1&aCgJpS7ucgVTjMis8f z5s(#oT^d)q2-sk4ciI{3;!n=>=tCWup+(gu|6C7M2=YXjHyATIun_>sxu}cHgKkIZJ1+$wY>_5V z_po5K=N|d851s}u8pHVpMh^&t|K8azvRL1(WEWsu@)<#quw_b18EbW`xN^?wQ6lzQ zseA7Aa%NB@*9r2IqUP_6dbnTpI;uQ(h;C00y;-u5hquYtqpwcd?5ssZscN3`Owi-t z6*{}VqU;j!{mV5n9~lZyIlTO$qER$omM%}XbQf`T$+y(HZEqLceecfoAndrbyt?3w zv+r6yfB;>K+#U~}gz+|u|A_0;@pQM*#>1I9*o(eT;6&~*J3O)qmtM4H|JCy(tk6qj zg)w4=n0*l_DGdEf8xM>iXbU~`jmwJXX1!&+`B(aC{&7Q-dik7Zg4wESu@(;hb>{Zl zf%IEqb4@*t3-mLQ^gK2zO%0+@TAJu^5osdDG`UDpx;5S-{Eu|e!^n{FGr7qz#3Fnh z+gGYgtCwz6xxHJulxfp$EiGr`gW~$?btABZJ2KG5tacbOI+Hrq#;x{$Pq0>$7N*I{ zIPKi>@Jc7qAPrwdApi}+d>jot_;MKr3S<{ucbO`d4tddFyAGavr9m`<(S^tBl2^iKo6?;OuAWuBmt-XED> zKE{+?3zRcVyjfeh$b(;cpJ`ij@^FzP|DAmD3f%TSeBAn}vSD|1kTuuJ_B|ck<#GGq zCHiyQ`Yc__bnho7^13$RH{g>+Xs1vIEVZF>FYa{ijwJ>Key^jVac{ z@nXcFi+5?m$xG6Rgd41oCD>l>Ns+qP_STb#*+cEjpHbQ-f6Ly zoSM=joIC50zJBiaP+pif#-&Bo5f7m1hxNIE2VKrqomtq~*fCk!lg0Yi69d!gH*;rI zo`;yHJVC<&H`r7;B0jyO=lgkz689=0`thA3+vVc zu!1s~^@4GS0_`$1-9?MSlaal-9<+7i+8J-#>>#OVx6_Yjev+dAbSW0{3PlPk;|dCz z)n#-Qg-I&*nhM(03ZX%X@^rbDILQ>`)NzloIYkuFF@qr(5fX``YLkA=;z}cB8YlM9 zS?X^N-|?(h;=l%i`cVBDZI9;iMBe?O8RAHvQ2}gAg!M44zp{hWRJ47pV3A6Z)6CA zwQBcxPkwy#>})K7Kw6MImN5~6iU5fUz1nQvY&!3AXer?g%0pFX)K2*RfSVtgL1}GLQCUk0c#UuI<=)jP|P+{_SH7k4>WZ2$)v@H_n>Lx zHxT||6&;M^Q_4Mm4s8Cs?H0E467J@|f_FZoQdY-b^qX~mrglV{Gu4ZV>pmJ^hs&!> zic)MCGiEsj+POF+L@p@{KMg>%Ngx-vjsJmx@Qx7omcI>t#nXf5=Wn2Oldz0TO$f2q zkWGeMun+|7G+i=(YIgO3M-+TwS*H0c5YR<7#rV6CuM8Qy`mi4Amn3rDH0^JXhsvp< zVEpdc7Pru^OD99otbf=G=*`uo$!}J}3l)n{3C(U=FEX`HK9{4xPN!EU!ASP>WdrI< z$gn%sD2yb99u3i7TjYgxzO-@*Ec+Ap5k%|7Z)?=H7asp8F2@!yQd`WE$71lJUVNjm zd)>RN3hckE@-oECcNICWpXcO{RxzA@a1ZAiZiTD&NIl|-IHkS3W5*6~?6W8k9LLB@=$65>9#k@zstT9Ig zT9@Eu?App>pig#eREOo(n-MDLTRM3A=LAmNOL&UgrNK}h{mHLgwABby{ysKt(unGK zRiZ>z5rjdTs%~RT8`4D3%Mr;Q;R1yy&1u5X!_;cWE4BtxY^KcVTL3is(@E$;otU*k z3*$yGwUD0pPA9jKw~}$pQK5xpoqodzb!Zr2ioZu!bjcXHb$~5=4t;jKlR`q=VKebO zMZRKU?q8;S=IyFVfJznk9-jS6;_;``*gaPVBMb)$FWh`^^qNcjko4w|!IZx@4Zom6 z7UTUT7I3(^Zk7xk;)?; zzAV*ll2hZQ)cR}Yq?|lb=_f23L?nbK&Y3N2$P#2P30pERu~J}1au2aUZzd*IlI8fY zPi#(HTFEDxMi?LkAag*M+t( z@JXc#)t9XHeA6<>mlSr>@vO~{A0Fjx8>Lqk8!ch$>_DYKPNHDhJl^;!mzmPTcu{Of zX5>jcY8F;D-DgN%?X1pD24T)8>gkPfde>JJ$IP4Ao`eWSpWB{K6A(g8nrfEn5sbhW zfc~mZJi`xBpFLAAZ5wg=#Do?!3lcVq9fr@!V+}RO!6J)6I?y3p9Q*dWKm{pcMv zKC+E%1M?xo=QB~&XHeVK%=~W)#r4Z<0!nnSY`DEGfHF)rcS;PX(`ev~`G6X2idt#T^Pb|!VFlSHYc^3N1+(ERc*=3)cH%-xa z(?gtVF#%J^M~G>+9&eZ@8fRF4k>qnP;0{LPt0t60UZ(=m$8c)5;rlhqo^e=eIqq1c zbWw5P)r1?j48!6iazPBxd4!PB2~y*&%p?0DarFE>%t*|fGk2sb#pQ5arzkILfB~oh zsUy6|b(H{*=*uHslLt7ygM2ae$h&0G70(JQ(KMKDj&xV+qq>GurMi2@HmI8^W5=&w zSWQwG5*@{cm1-a#=dnM#@m1TZD**Gok|@9MhBkT(4AfbbJ}>Q7S_j+(b(nwCN<#u_ zaeNDmIP1vV3@(2N_!r1KIK9;UIaQtD_c1u})HrHlXd41>3w z6Cs813!oxlt_9S~Od{->2lo8w*zRUYunWsDn_Sm^g}c5FfSX8TwoEoq!?<>6FQGe7~I!afF8exz^DOU-eZBChFw4jEJhQg+LXUg~NEk@t#R)X-@-YzSFp#F9h^7^@MFOedkK~;TPlN!G};?aqoRtg@Cj;cfvk0 z&0{&=%~Dh7-)%%#;B46bZ{B=+wfM%f#WF}$KEE&XD#G?OeD~6T2qkvYH)iorbe2H- z$<4}5z>$$aTG-fv2;b))LgsEQ9r)xV#4;D$Q6fe4Q3?hp}1*y=I%0pUuX^4)^$;eiU-dOU5Pn zB0H3WXt@IDHjErw_9RM<+w@)8Ttv)y;rVzhs|W}4zXGv1yO+%o%gDxI1d1zuI6LE1 zie{Kd`bfSvKzn~@Y6{pfujX%KUZi1)pE;e1r*UN=g>2|Y_MD33EBsb%N%%fv=7KlD z$B9X?y4}b=@xi>^@OU-!RV}GtasK7o-J6-YqkaMX2$^`OW*AY}fUm ziQ97{G&FMFOJ9+QbKadhQae068MScLg=05S?D;JMf7XmrDC9Mkc&=_B}_ z21*NF4FQULJNQ{rybXi3~LX4t*rt0?DC1}GrXHm(O2a*C9wX|Tj zEgVQk#Nes+o9KIk6cP`89VAMSXh!s56nuY)wm($7X7Y``&J2IR2fAT?S_{@dgso)N ztx?PbBVrt-BVjbwaOjTiEAj?(JZ^fqHFEM&q#jj=mEHY$dg7nil4mp1gcbg~wn_ct z42NgjzzImxv9QsrNSTEHve6V&8&HNj&Tul$vQYo0vAP%=86#G}+DD04+AP*I!wJWJ zr4Jj;OlLbeco+^M%t;nTpDr#ntX+iaYOj*^jmm31B7RdACA~P0gqQZA_u@Nt1O1t4 zY?iIfZ`V4wZ?7kw!lq(;vOhXn&@Z)LHEYNCM1V(qx0^XI=QzpmkOK%<31MbQ^Z51% z6JW0P!vF=s@1|2CcU2f7iW044q^H`knuC;p^}4H|TpqM!+5}YYha*MvCIUMw25~a{rs%y8zG0lc= z4HXRO%TGH=zF2GS_Gt02I~-_Buv$Dwx$u($zVUXQ`|pfXoikTLu(jBrtn z60Er;Ud%TF8u6R+wgxLMX>6VKhoVhF7%Y-x|3JJ`!Fy+QFX2V8rWwYM{ejstdiYnq z3=z-H^2gD2!B2f@I>zb@w;QTCD$`x< zRTy~%z41MB?zXzNsk0Q^g3E>J#4(G*w@;*zskUB&**+xjzB2k)T_|TXzn^XpK~5~t zpxph%N`8Cjs<&t9SYDYrm&a}D<#IgQ7RVVt6MxO%Zg||qB zgyXrr0&QeSF3+yNdN%u%YXk1pyG%*ti&N~s`U@YgRhcRxUtkS z_EG)_l{Mn0Y4YNY#8VTl5Tp4x-aAR}gmdGZ4q51x}7Jl*BS@1x;Nv)6br9V(AoHa#!1)pMfiuL0^+23(R-3p8r0e^6E{FuI9Dn`4}p zWuDe!2Fm?kc?}LT^{4mpGu(#K)rYT*fv0=+xhi@8b3h03MCT~-F9y2gxm6ct^+B`% z)YF!lZy^GwUtAF{a_FZ|DsvSrYVsiq<7(AL8`14h!R zmy!VB510`swgYki5g@az`i>AJw)a1fG#rzJ2XQXWc_u*Q?6imr>56mD%*}5E=-eCi zwpiL8dBC&5a8?o@Ds9e0f0L3i+8jxfUt>5cfZW9Of!iP-zE5FUS08H-!U|n>FGUXE zH<0|c&H(;|3FVKRLQRX=-xelyc|I#+)z-2LDmCqt?l&zH1#K3;7IX=^38el2w!sel zCOr*XmxPT#B!ji{Srn^32rCW?28Q%4qedz6BwlgR||7D(~+s{rmAJ5ZsbBYxRh)~?I6tg?5P6`v|D*BBPVvJ)eF zihIu-5&S&fecnMT*Z=OX>s%E$;lk1~6PP$j43sn2HdpnD@N z0g+azuTUUM?Qsq`_q%XkwF`HH2a*ml8)WHd6pDhZdn?hUcP9HajSS%#?M+}SpZ;31 z+BgvVNkGAj{eukfrWJko?Frn3yqo3f4NT z_rQ?vO^W01UH3^Svv{*k7XMRrWch|NW_Ei5fIC*&#=_!Al0hmj#=*2Uy{1VV|NOjZ z0ny3v{BS$lKu`IwVCUKEa^_5{E{Z_9R_y_qW#6&0T@J?@hZPOrI#AoPhyqIXejia5 z?9Z`y*W&q43jRBJMXufUO9?WAV~agm+?fObZ-O+~H6?XJ$4BwfKAav9+OmF(>yxhQ7jsB^(2=3ZyvSBeb`y3A%p*==4{^=)U6P);KEw$SqbgvySh8byh6y z&ux(YkqICY;N?b+qwZv?T0iqae$>#}xoM}a$nV+X;uHn$Kc9wm4-4cTvEekGK?>wE z0elYgit1g8r8BIKAevM)uM_a8o5xm0#_~AUWo!3&Df?VOiAFn*Yd07TD_B@OfM5W# zKuXC>%G^W*aW1|8Y@oDjD`C8?7gea}vSsVbkAw$vcv2sZkjI6@N666#xKL$esmd?k zRw~l(t9W;_mBF$qx)n?>W6J@22{I{h%K*Nm4r>=kDM<|rW&|D`(#40VUu8C_&t~1M zlIutzZvd&6Vqx(_{6J=2vq2i}8YLuvb7lUtAfK)+1&-_@UPEaGeed6c71w^>RUydA zS9gW+E=6e|8eD14u*(uGaabKdN^oij_T!$|w{(bm z86S}1_Sd1io32NA5a-fI(*ylFrU*?QXHOpHX|B&*UUchG{Mb+$k{qO#ca5`bG6zdy zSpUcPnO^Wxpv{VnT&`1F%(JjKi%I~CK*T4Xg$V`NoI*|W=NA;j4S6;9!nqsr`_3>f zI+W97`8&ZwwtN+q6&=FeZ{QFiNa@lvXV_yd<~pnvAY}kGgavVrrKSkl+H3#^D#nmK zn~}1xxEckB#CI}CD#Qh22R_VGwVXV^AZFNWxoMYgDKnj6Tx=LGY1=p9y=Ra2+Xeld zLM})(%V=lVVh-jm34aEe9@fx5RdhJDnaJk;Z=e5GB9$v??KS*K5bYus7H@ zHb`Ry78Y-37=U>o!ivVwB*pR1e>Ni^rcn&vENwYkxSylCHS*0|DAj1hcGxAbmVrm^e<8lE6Rv@ryz62~k{!HqFS)SKl7; zYK|{WgL$3l0Dg4jwd=y1%Kg?LJ>Z-L1tI}kAJrOI>L=A5j;?}8+Zb|4R9>+rYR&En z!I3oi*$&A))$y@`R<1UHg~iEe zKuTm%08E7OT}9r-TZ;C>UeA$E`7r;~!#tHk)MJNq@#)^A0q+oFI2xq;w#zVQN$3xv zyW>o_3yU5C zkX)#HAn{#SKh8P!*JWX0@dgASb&3z+T8DMb(Z47Q3yb&Q1K<^q^dDC^teuYk6##m`#xG|KCw5q4VPSCqVvt^w&LSTq^PPo-g@uKMg@uKMg@uKMg@uKM kg@uKMg@uKMg~bW{Ki%~Ym7)X)?f?J)07*qoM6N<$f=Q}<^#A|> diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 5bfc9222f..d28329f49 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -161,7 +161,6 @@ services: - rest-proxy-1 environment: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 - KAFKA_REST_PROXY: http://rest-proxy-1:8082 KAFKA_BROKERS: 3 RADAR_TOPICS: ${RADAR_TOPIC_LIST} RADAR_PARTITIONS: 3 @@ -195,11 +194,8 @@ services: depends_on: - hotstorage restart: always - environment: - MONGODB_USER: ${HOTSTORAGE_USERNAME} - MONGODB_PASS: ${HOTSTORAGE_PASSWORD} - MONGODB_DATABASE: ${HOTSTORAGE_NAME} - MONGODB_HOST: hotstorage:27017 + volumes: + - "./etc/rest-api:/usr/local/tomcat/conf/radar" healthcheck: test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:8080/radar/api/"] interval: 1m diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 1e6a1fb91..3b34424e1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -83,7 +83,11 @@ http { proxy_http_version 1.1; } location /api/ { - proxy_pass http://rest-api:8080/; + proxy_pass http://rest-api:8080/radar/api/; + proxy_set_header Host $host; + } + location /frontend/config/ { + proxy_pass http://rest-api:8080/radar/frontend/; proxy_set_header Host $host; } } diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/device-catalog.yml.template b/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/device-catalog.yml.template new file mode 100644 index 000000000..00ff3031b --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/device-catalog.yml.template @@ -0,0 +1,116 @@ +#=========================== Device Catalog ===========================# +#List of integrated sources with relative configuration +device_catalog: + #====================== Empatica E4 Wristband =======================# + #A device item states the specification and the configurations required to integrate a wearable + # device into the data access interface. The used value must be contained in + # RADAR-Schemas/restapi/source/sources_type.avsc + EMPATICA: + #List of on-board sensors + sensors: + #Sensor name. Possible values are listed by RADAR-Schemas/restapi/data/sensor_type.avsc + - name: ACCELEROMETER + #Default data frequency + frequency: 32.0 + #Measurement unit. Possible values are listed in RADAR-Schemas/restapi/data/unit.avsc + unit: G + #Define if the samples are RAW data or instead they the result of some computation. Possible + # values are listed by RADAR-Schemas/restapi/data/data_type.avsc + data_type: RAW + #The associated Data Class: actual generalised reader implantation that extract data from + # the storage + data_class: ACCELERATION_FORMAT + #List of supported collections + collections: + #Time-frame of 10 seconds + 10sec: android_empatica_e4_acceleration_output + #Time-frame of 30 seconds + 30sec: android_empatica_e4_acceleration_output_30sec + #Time-frame of 1 minute + 1min: android_empatica_e4_acceleration_output_1min + #Time-frame of 10 minutes + 10min: android_empatica_e4_acceleration_output_10min + #Time-frame of 1 hour + 1h: android_empatica_e4_acceleration_output_1h + #Time-frame of 1 day + 1d: android_empatica_e4_acceleration_output_1d + #Time-frame of 1 week + 1w: android_empatica_e4_acceleration_output_1w + - name: BATTERY + frequency: 1.0 + unit: PERCENTAGE + data_type: RAW + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_battery_level_output + 30sec: android_empatica_e4_battery_level_output_30sec + 1min: android_empatica_e4_battery_level_output_1min + 10min: android_empatica_e4_battery_level_output_10min + 1h: android_empatica_e4_battery_level_output_1h + 1d: android_empatica_e4_battery_level_output_1d + 1w: android_empatica_e4_battery_level_output_1w + - name: BLOOD_VOLUME_PULSE + frequency: 64.0 + unit: NANOWATT + data_type: RAW + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_blood_volume_pulse_output + 30sec: android_empatica_e4_blood_volume_pulse_output_30sec + 1min: android_empatica_e4_blood_volume_pulse_output_1min + 10min: android_empatica_e4_blood_volume_pulse_output_10min + 1h: android_empatica_e4_blood_volume_pulse_output_1h + 1d: android_empatica_e4_blood_volume_pulse_output_1d + 1w: android_empatica_e4_blood_volume_pulse_output_1w + - name: ELECTRODERMAL_ACTIVITY + frequency: 4.0 + unit: MICROSIEMENS + data_type: RAW + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_electrodermal_activity_output + 30sec: android_empatica_e4_electrodermal_activity_output_30sec + 1min: android_empatica_e4_electrodermal_activity_output_1min + 10min: android_empatica_e4_electrodermal_activity_output_10min + 1h: android_empatica_e4_electrodermal_activity_output_1h + 1d: android_empatica_e4_electrodermal_activity_output_1d + 1w: android_empatica_e4_electrodermal_activity_output_1w + - name: HEART_RATE + frequency: 1.0 + unit: BEATS_PER_MIN + data_type: RADAR + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_heartrate + 30sec: android_empatica_e4_heartrate_30sec + 1min: android_empatica_e4_heartrate_1min + 10min: android_empatica_e4_heartrate_10min + 1h: android_empatica_e4_heartrate_1h + 1d: android_empatica_e4_heartrate_1d + 1w: android_empatica_e4_heartrate_1w + - name: INTER_BEAT_INTERVAL + frequency: 1.0 + unit: BEATS_PER_MIN + data_type: RAW + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_inter_beat_interval_output + 30sec: android_empatica_e4_inter_beat_interval_output_30sec + 1min: android_empatica_e4_inter_beat_interval_output_1min + 10min: android_empatica_e4_inter_beat_interval_output_10min + 1h: android_empatica_e4_inter_beat_interval_output_1h + 1d: android_empatica_e4_inter_beat_interval_output_1d + 1w: android_empatica_e4_inter_beat_interval_output_1w + - name: THERMOMETER + frequency: 4.0 + unit: CELSIUS + data_type: RAW + data_class: DOUBLE_FORMAT + collections: + 10sec: android_empatica_e4_temperature_output + 30sec: android_empatica_e4_temperature_output_30sec + 1min: android_empatica_e4_temperature_output_1min + 10min: android_empatica_e4_temperature_output_10min + 1h: android_empatica_e4_temperature_output_1h + 1d: android_empatica_e4_temperature_output_1d + 1w: android_empatica_e4_temperature_output_1w \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/radar.yml.template b/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/radar.yml.template new file mode 100644 index 000000000..70aa68423 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/rest-api/radar.yml.template @@ -0,0 +1,27 @@ +version: 0.1 +released: 2017-05-10 + +# Swagger documentation version +swagger_version: 1.0.0 + +# Comma separated list stating supported Application Protocols applicable to query APIs +application_protocol: https + +# Machine address hosting the APIs +host: localhost + +# Base or Root path for the RESTFul interface +api_base_path: api + +# Mongodb hosts +mongo_hosts: + hotstorage: 27017 + +# Mongodb User +mongo_user: + usr: + pwd: + db: + +# Path to the Device Catalog +device_catalog: device-catalog.yml \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index c164286e6..9e4b991b8 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -36,6 +36,18 @@ echo "==> Configuring HDFS Connector" copy_template_if_absent etc/sink-hdfs.properties inline_variable 'topics=' "${RADAR_RAW_TOPIC_LIST}" etc/sink-hdfs.properties +echo "==> Configuring REST-API" +copy_template_if_absent etc/rest-api/radar.yml +copy_template_if_absent etc/rest-api/device-catalog.yml + +# Set MongoDb credential +inline_variable 'usr:[[:space:]]' $HOTSTORAGE_USERNAME etc/rest-api/radar.yml +inline_variable 'pwd:[[:space:]]' $HOTSTORAGE_PASSWORD etc/rest-api/radar.yml +inline_variable 'db:[[:space:]]' $HOTSTORAGE_NAME etc/rest-api/radar.yml + +# Set variable for Swagger +inline_variable 'host:[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf + echo "==> Configuring nginx" copy_template_if_absent etc/nginx.conf inline_variable 'server_name[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index 8b14e3eb5..af2642ace 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -11,10 +11,11 @@ fi # Wait until all brokers are up & running interval=1 while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do - BROKERS=$(curl -sS $KAFKA_REST_PROXY/brokers) - BROKERS="$(echo -e "${BROKERS:12}" | tr -d '[:space:]' | tr -d '}' | tr -d ']')" + ZOOKEEPER_CHECK=$(zookeeper-shell $KAFKA_ZOOKEEPER_CONNECT <<< "ls /brokers/ids") + ZOOKEEPER_CHECK="${ZOOKEEPER_CHECK##*$'\n'}" + ZOOKEEPER_CHECK="$(echo -e "${ZOOKEEPER_CHECK}" | tr -d '[:space:]' | tr -d '[' | tr -d ']')" - IFS=',' read -r -a array <<< $BROKERS + IFS=',' read -r -a array <<< $ZOOKEEPER_CHECK LENGTH=${#array[@]} if [ "$LENGTH" != "$KAFKA_BROKERS" ]; then diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index 7e607ff12..21c133f35 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -17,6 +17,8 @@ MAINTAINER @fnobilia, @afolarin LABEL description="RADAR-CNS Rest Api docker container" +RUN mkdir /usr/local/tomcat/conf/radar + # Install Rest API RUN echo && echo "==> Installing Components" \ # Download Git RestApi release @@ -29,7 +31,10 @@ RUN echo && echo "==> Installing Components" \ # Remove repository && echo "==> Cleaning up" \ && cd /usr/local && rm -R /usr/local/RADAR-RestApi \ - && echo + # Create config folder + && echo "==> Creating config folder" \ + && mkdir /usr/local/tomcat/conf/radar \ + && echo EXPOSE 8080 diff --git a/images/radar-restapi/README.md b/images/radar-restapi/README.md index e0988b5af..d0a8006a0 100644 --- a/images/radar-restapi/README.md +++ b/images/radar-restapi/README.md @@ -10,25 +10,13 @@ Or pull from dockerhub: $ docker pull radarcns/radar-restapi:latest ``` +Edit `radar.yml` and `device-catalog.yml`, and place them under `/path/to/config` + Run the docker image locally: ``` -$ docker run -d -p 8080:8080 --name radar-restapi radarcns/radar-restapi:0.1 +$ docker run -d -p 8080:8080 -v /path/to/config:/usr/local/tomcat/conf/radar --name radar-restapi radarcns/radar-restapi:0.1 ``` The RestApi will be running at http://localhost:8080. To test them, access the [Swagger Documentation](http://localhost:8080/radar/api/swagger.json) ## Runtime environment variables - -Environment variables used by the RestApi - -```bash -# mongoDb user and password -MONGODB_USER='restapi' -MONGODB_PASS='radar' - -# mongoDb database -MONGODB_DATABASE='hotstorage' - -# mongoDb instance -MONGODB_HOST='localhost:27017' -``` From 073caed0c16aabc31887910379a118522beee03b Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 10 May 2017 20:27:46 +0100 Subject: [PATCH 157/197] Fixed error in installation script --- dcompose-stack/radar-cp-hadoop-stack/.gitignore | 1 - dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore | 3 +++ dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index 20ce7c905..4668dd37a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1,3 +1,2 @@ -/etc/smtp.env /.env /output/ diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore index 79f6c9f15..9b6b65b55 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/.gitignore @@ -2,3 +2,6 @@ /sink-hdfs.properties /nginx.conf /radar.yml +/smtp.env +/rest-api/device-catalog.yml +/rest-api/radar.yml \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 9e4b991b8..d0b2aaf2b 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -46,7 +46,7 @@ inline_variable 'pwd:[[:space:]]' $HOTSTORAGE_PASSWORD etc/rest-api/radar.yml inline_variable 'db:[[:space:]]' $HOTSTORAGE_NAME etc/rest-api/radar.yml # Set variable for Swagger -inline_variable 'host:[[:space:]]*' "${SERVER_NAME};" etc/nginx.conf +inline_variable 'host:[[:space:]]*' "${SERVER_NAME}" etc/rest-api/radar.yml echo "==> Configuring nginx" copy_template_if_absent etc/nginx.conf From 4be7364cd5353d19081bccaccf17537954aea8e9 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 15 May 2017 11:41:00 +0100 Subject: [PATCH 158/197] Fixed .well-known creation --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 72f70060b..99c1d198e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -82,7 +82,7 @@ self_signed_certificate() { SSL_PATH="/etc/openssl/live/${SERVER_NAME}" echo "==> Generating self-signed certificate" sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ - /bin/sh -c "mkdir -p '${SSL_PATH}' && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/cert.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/chain.pem' && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" + /bin/sh -c "mkdir -p '${SSL_PATH}' && mkdir /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/cert.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/chain.pem' && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" } letsencrypt_certonly() { From 88654dfe0360443ff0e528c3054fbc8273ac408c Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 15 May 2017 13:17:12 +0200 Subject: [PATCH 159/197] Fixed cert well-known directory creation and moved to script --- .../lib/self-sign-certificate.sh | 25 +++++++++++++++++++ dcompose-stack/radar-cp-hadoop-stack/util.sh | 8 +++--- 2 files changed, 29 insertions(+), 4 deletions(-) create mode 100755 dcompose-stack/radar-cp-hadoop-stack/lib/self-sign-certificate.sh diff --git a/dcompose-stack/radar-cp-hadoop-stack/lib/self-sign-certificate.sh b/dcompose-stack/radar-cp-hadoop-stack/lib/self-sign-certificate.sh new file mode 100755 index 000000000..5a151982a --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/lib/self-sign-certificate.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +set -e + +if [ $# -ne 1 ]; then + echo "Need SSL path parameter" + exit 1 +fi + +SSL_PATH="$1" + +if [ ! -e "${SSL_PATH}" ]; then + mkdir -p "${SSL_PATH}" +fi +if [ ! -e "/var/lib/openssl/.well-known" ]; then + mkdir -p /var/lib/openssl/.well-known +fi +apk update +apk add openssl + +cd "${SSL_PATH}" +find . -type f -delete +openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout privkey.pem -out cert.pem -days 3650 -nodes -sha256 +cp cert.pem chain.pem +cp cert.pem fullchain.pem diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index 72f70060b..d66a4ed34 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -79,10 +79,9 @@ copy_template_if_absent() { self_signed_certificate() { SERVER_NAME=$1 - SSL_PATH="/etc/openssl/live/${SERVER_NAME}" echo "==> Generating self-signed certificate" - sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl alpine:3.5 \ - /bin/sh -c "mkdir -p '${SSL_PATH}' && touch /var/lib/openssl/.well-known && apk update && apk add openssl && openssl req -x509 -newkey rsa:4086 -subj '/C=XX/ST=XXXX/L=XXXX/O=XXXX/CN=localhost' -keyout '${SSL_PATH}/privkey.pem' -out '${SSL_PATH}/cert.pem' -days 3650 -nodes -sha256 && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/chain.pem' && cp '${SSL_PATH}/cert.pem' '${SSL_PATH}/fullchain.pem' && rm -f '${SSL_PATH}/.letsencrypt'" + sudo-linux docker run -i --rm -v certs:/etc/openssl -v certs-data:/var/lib/openssl -v "${PWD}/lib/self-sign-certificate.sh:/self-sign-certificate.sh" alpine:3.5 \ + /self-sign-certificate.sh "/etc/openssl/live/${SERVER_NAME}" } letsencrypt_certonly() { @@ -150,7 +149,8 @@ request_certificate() { letsencrypt_certonly "${SERVER_NAME}" fi fi - sudo-linux docker-compose kill -s HUP webserver + echo "Reloading webserver configuration" + sudo-linux docker-compose kill -s HUP webserver 1>/dev/null 2>&1 } echo "OS version: $(uname -a)" From 82d6a03a519971bc31ec9a1091e8c22ceaee9a17 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 15 May 2017 13:18:15 +0200 Subject: [PATCH 160/197] Update README.md --- images/radar-restapi/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/images/radar-restapi/README.md b/images/radar-restapi/README.md index d0a8006a0..105f5e1f0 100644 --- a/images/radar-restapi/README.md +++ b/images/radar-restapi/README.md @@ -18,5 +18,3 @@ $ docker run -d -p 8080:8080 -v /path/to/config:/usr/local/tomcat/conf/radar --n ``` The RestApi will be running at http://localhost:8080. To test them, access the [Swagger Documentation](http://localhost:8080/radar/api/swagger.json) - -## Runtime environment variables From 836823cce2cd8a0dde498dd9f4b79f1ff9f22ad8 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 15 May 2017 14:46:30 +0200 Subject: [PATCH 161/197] Also restart backend streams if needed --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index d28329f49..d88db666d 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -378,6 +378,7 @@ services: - kafka-init volumes: - ./etc/radar.yml:/etc/radar.yml + restart: always environment: KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} From 9af57cec80a56c96d2976465fda83dbf05e12807 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 15 May 2017 15:55:48 +0200 Subject: [PATCH 162/197] Use HTTP/1.1 with Confluent webservices<->nginx --- dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 1e6a1fb91..8edd6fda1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -62,10 +62,14 @@ http { location /kafka/ { proxy_pass http://rest-proxy-1:8082/; proxy_set_header Host $host; + proxy_http_version 1.1; + proxy_set_header Connection ""; } location /schema/ { proxy_pass http://schema-registry-1:8081/; proxy_set_header Host $host; + proxy_http_version 1.1; + proxy_set_header Connection ""; } location /dashboard/ { proxy_pass http://dashboard:3000/; From 1694df1a3ddd397da6530d859778c754113329fa Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 15 May 2017 15:18:57 +0100 Subject: [PATCH 163/197] Added schemaless key and value topics --- dcompose-stack/radar-cp-hadoop-stack/etc/env.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index 270c8f438..3a84320d3 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -1,6 +1,6 @@ SERVER_NAME=localhost SELF_SIGNED_CERT=yes -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,schemaless-key,schemaless-value RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time HOTSTORAGE_USERNAME= From 403e5b6d25e91525b0fd58c0e1a4ec717a49fb6f Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 15 May 2017 16:56:48 +0100 Subject: [PATCH 164/197] Fixed folder creation in Rest Api image --- images/radar-restapi/Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index 21c133f35..f32dc62a9 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -17,8 +17,6 @@ MAINTAINER @fnobilia, @afolarin LABEL description="RADAR-CNS Rest Api docker container" -RUN mkdir /usr/local/tomcat/conf/radar - # Install Rest API RUN echo && echo "==> Installing Components" \ # Download Git RestApi release @@ -38,4 +36,4 @@ RUN echo && echo "==> Installing Components" \ EXPOSE 8080 -CMD ["catalina.sh", "run"] \ No newline at end of file +CMD ["catalina.sh", "run"] From 9470b842adba624c55e0893892d3ebc90198c47b Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 16 May 2017 09:44:47 +0200 Subject: [PATCH 165/197] Simplified REST API Dockerfile --- images/radar-restapi/Dockerfile | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index f32dc62a9..20b80ce0b 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -18,21 +18,7 @@ MAINTAINER @fnobilia, @afolarin LABEL description="RADAR-CNS Rest Api docker container" # Install Rest API -RUN echo && echo "==> Installing Components" \ - # Download Git RestApi release - && echo "==> Downloading RADAR-CNS/RADAR-RestApi v0.1-beta.1 release from GitHub" \ - && cd /usr/local && mkdir RADAR-RestApi && cd /usr/local/RADAR-RestApi \ - && wget https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1-beta.1/radar.war \ - # Deploy the war - && echo "==> Deploying the WAR" \ - && cp /usr/local/RADAR-RestApi/radar.war /usr/local/tomcat/webapps/ \ - # Remove repository - && echo "==> Cleaning up" \ - && cd /usr/local && rm -R /usr/local/RADAR-RestApi \ - # Create config folder - && echo "==> Creating config folder" \ - && mkdir /usr/local/tomcat/conf/radar \ - && echo +ADD https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1-beta.1/radar.war /usr/local/tomcat/webapps EXPOSE 8080 From e8f1f99ff50c4a233cbf2e9ebfeca088832f67b2 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Thu, 18 May 2017 16:19:42 +0200 Subject: [PATCH 166/197] Use Java 8 for the Tomcat image --- images/radar-restapi/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index 20b80ce0b..c1327f5c9 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. -FROM tomcat:8.0.37 +FROM tomcat:8.0.44-jre8 MAINTAINER @fnobilia, @afolarin From a9be20615aa9118525860ca9b87a191a215f1a38 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 22 May 2017 14:17:35 +0200 Subject: [PATCH 167/197] Kafka-shell for checking topics availability The same check should be done for the RADAR-Backend container too, but it has to be built from a different docker image. --- .../radar-cp-hadoop-stack/docker-compose.yml | 2 - .../kafka-radarinit/topic_init.sh | 10 ++--- images/radar-hdfs-connector/kafka_status.sh | 41 +++++++++---------- .../radar-mongodb-connector/kafka_status.sh | 37 +++++++++-------- 4 files changed, 43 insertions(+), 47 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index d88db666d..035b71344 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -312,7 +312,6 @@ services: CONNECT_CONSUMER_MAX_POLL_INTERVAL_MS: 300000 CONNECT_CONSUMER_SESSION_TIMEOUT_MS: 10000 CONNECT_CONSUMER_HEARTBEAT_INTERVAL_MS: 3000 - KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# @@ -355,7 +354,6 @@ services: CONNECT_OFFSET_STORAGE_FILE_FILENAME: "/tmp/connect2.offset" CONNECT_REST_ADVERTISED_HOST_NAME: "radar-hdfs-connector" CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 - KAFKA_REST_PROXY: http://rest-proxy-1:8082 TOPIC_LIST: ${RADAR_TOPIC_LIST} CONNECTOR_PROPERTY_FILE_PREFIX: "sink-hdfs" diff --git a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh index af2642ace..965769552 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/kafka-radarinit/topic_init.sh @@ -9,6 +9,11 @@ if [ -f /.radar_topic_set ]; then fi # Wait until all brokers are up & running +if [ -z ${KAFKA_ZOOKEEPER_CONNECT} ]; then + echo "KAFKA_ZOOKEEPER_CONNECT is not defined" + exit 2 +fi + interval=1 while [ "$LENGTH" != "$KAFKA_BROKERS" ]; do ZOOKEEPER_CHECK=$(zookeeper-shell $KAFKA_ZOOKEEPER_CONNECT <<< "ls /brokers/ids") @@ -34,11 +39,6 @@ if [ -z ${RADAR_TOPICS} ]; then exit 2 fi -if [ -z ${KAFKA_ZOOKEEPER_CONNECT} ]; then - echo "KAFKA_ZOOKEEPER_CONNECT is not defined" - exit 2 -fi - if [ -z ${RADAR_PARTITIONS} ]; then echo "RADAR_PARTITIONS is not defined" exit 2 diff --git a/images/radar-hdfs-connector/kafka_status.sh b/images/radar-hdfs-connector/kafka_status.sh index a3264ea49..645f8e47f 100755 --- a/images/radar-hdfs-connector/kafka_status.sh +++ b/images/radar-hdfs-connector/kafka_status.sh @@ -1,8 +1,8 @@ #!/bin/bash # Check if variables exist -if [ -z "$KAFKA_REST_PROXY" ]; then - echo "KAFKA_REST_PROXY is not defined" +if [ -z "$CONNECT_ZOOKEEPER_CONNECT" ]; then + echo "CONNECT_ZOOKEEPER_CONNECT is not defined" exit 2 fi @@ -11,38 +11,30 @@ if [ -z "$TOPIC_LIST" ]; then exit 2 fi -if [ -z "$CONNECTOR_PROPERTY_FILE_PREFIX" ]; then - echo "CONNECTOR_PROPERTY_FILE_PREFIX is not defined" - exit 2 -fi +# Save current IFS +SAVEIFS=$IFS + # Fetch env topic list IFS=', ' read -r -a needed <<< $TOPIC_LIST # Fetch env topic list +IFS=$'\n' count=0 interval=1 -max_retryes=5 +max_retryes=10 while [ "$count" != "${#needed[@]}" ] ; do if [ "$max_retryes" -eq "0" ] ; then - echo "Error connecting to Rest-Proxy ... " - echo "Rebooting ... " + IFS=$SAVEIFS + echo "Force rebooting ... " exit 2 fi - echo "Waiting $interval second before retrying ..." - sleep $interval - if (( interval < 30 )); then - ((interval=interval*2)) - fi - count=0 - TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") - curl_result=$? - TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" + topics=$(kafka-topics --list --zookeeper $CONNECT_ZOOKEEPER_CONNECT) + topics=($topics) - IFS=',' read -r -a array <<< $TOPICS - for topic in "${array[@]}" + for topic in "${topics[@]}" do for need in "${needed[@]}" do @@ -52,8 +44,13 @@ while [ "$count" != "${#needed[@]}" ] ; do done done - if [ "$curl_result" -ne "0" ] ; then - ((max_retryes--)) + if [ "$count" != "${#needed[@]}" ] ; then + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi + ((max_retryes--)) fi done diff --git a/images/radar-mongodb-connector/kafka_status.sh b/images/radar-mongodb-connector/kafka_status.sh index 132d0a06a..b3e50697b 100755 --- a/images/radar-mongodb-connector/kafka_status.sh +++ b/images/radar-mongodb-connector/kafka_status.sh @@ -1,8 +1,8 @@ #!/bin/bash # Check if variables exist -if [ -z "$KAFKA_REST_PROXY" ]; then - echo "KAFKA_REST_PROXY is not defined" +if [ -z "$CONNECT_ZOOKEEPER_CONNECT" ]; then + echo "CONNECT_ZOOKEEPER_CONNECT is not defined" exit 2 fi @@ -11,34 +11,30 @@ if [ -z "$TOPIC_LIST" ]; then exit 2 fi +# Save current IFS +SAVEIFS=$IFS + # Fetch env topic list IFS=', ' read -r -a needed <<< $TOPIC_LIST # Fetch env topic list +IFS=$'\n' count=0 interval=1 -max_retryes=5 +max_retryes=15 while [ "$count" != "${#needed[@]}" ] ; do if [ "$max_retryes" -eq "0" ] ; then - echo "Error connecting to Rest-Proxy ... " - echo "Rebooting ... " + IFS=$SAVEIFS + echo "Force rebooting ... " exit 2 fi - echo "Waiting $interval second before retrying ..." - sleep $interval - if (( interval < 30 )); then - ((interval=interval*2)) - fi - count=0 - TOPICS=$(curl -sSX GET -H "Content-Type: application/json" "$KAFKA_REST_PROXY/topics") - curl_result=$? - TOPICS="$(echo -e "${TOPICS}" | tr -d '"' | tr -d '[' | tr -d ']' | tr -d '[:space:]' )" + topics=$(kafka-topics --list --zookeeper $CONNECT_ZOOKEEPER_CONNECT) + topics=($topics) - IFS=',' read -r -a array <<< $TOPICS - for topic in "${array[@]}" + for topic in "${topics[@]}" do for need in "${needed[@]}" do @@ -48,8 +44,13 @@ while [ "$count" != "${#needed[@]}" ] ; do done done - if [ "$curl_result" -ne "0" ] ; then - ((max_retryes--)) + if [ "$count" != "${#needed[@]}" ] ; then + echo "Waiting $interval second before retrying ..." + sleep $interval + if (( interval < 30 )); then + ((interval=interval*2)) + fi + ((max_retryes--)) fi done From d6f6dfafc270ac9b0e04970ea9dcea919df367d7 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 22 May 2017 18:56:51 +0200 Subject: [PATCH 168/197] Connectors version 0.2.1 --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 035b71344..a930d2432 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -275,7 +275,7 @@ services: # RADAR mongo connector # #---------------------------------------------------------------------------# radar-mongodb-connector: - image: radarcns/radar-mongodb-connector-auto:0.2 + image: radarcns/radar-mongodb-connector-auto:0.2.1 restart: on-failure volumes: - ./etc/sink-mongo.properties:/etc/kafka-connect/sink.properties @@ -318,7 +318,7 @@ services: # RADAR HDFS connector # #---------------------------------------------------------------------------# radar-hdfs-connector: - image: radarcns/radar-hdfs-connector-auto:0.2 + image: radarcns/radar-hdfs-connector-auto:0.2.1 restart: on-failure volumes: - ./etc/sink-hdfs.properties:/etc/kafka-connect/sink-hdfs.properties From 0847ab0c211d1b279dcfc4f286bdab661bcaa3a3 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Fri, 26 May 2017 15:11:53 +0100 Subject: [PATCH 169/197] Use Network Manager --- scripts/check-radar-network.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/check-radar-network.sh b/scripts/check-radar-network.sh index 15713f09c..0b9c186b3 100644 --- a/scripts/check-radar-network.sh +++ b/scripts/check-radar-network.sh @@ -1,5 +1,7 @@ #!/bin/bash +# network interface +network=eduroam # network interface nic=wlp5s1 # lock file @@ -32,18 +34,18 @@ esac # force connection connect() { log_info "Forcing reconnection" - sudo ifdown --force $nic >> $logfile 2>&1 + sudo nmcli conn down $network >> $logfile 2>&1 log_info "Turning wifi NIC off" - sleep 10 - sudo ifup $nic >> $logfile 2>&1 + sleep 30 + sudo nmcli conn up $network >> $logfile 2>&1 log_info "Turning wifi NIC on" log_info "Double checking ..." if ! isConnected; then log_info "Forcing reconnection with a sleep time of 30 sec ..." - sudo ifdown --force $nic >> $logfile 2>&1 + sudo nmcli conn down $network >> $logfile 2>&1 log_info "Turning wifi NIC off" - sleep 30 - sudo ifup $nic >> $logfile 2>&1 + sleep 60 + sudo nmcli conn up $network >> $logfile 2>&1 log_info "Turning wifi NIC on" fi log_info "Completed" @@ -82,4 +84,4 @@ if [ $actualsize -ge $minimumsize ]; then timestamp=$(date '+%d-%m-%Y_%H-%M-%S'); cp $logfile $logfile"_"$timestamp > $logfile -fi \ No newline at end of file +fi From 4f62c57339ab60aff55d46792c376d9672e2464f Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 31 May 2017 12:50:12 +0100 Subject: [PATCH 170/197] Force HDFS commit in case no more data comes in Example scenario: data produced by the last user is not enough to invoke HDFS file commit (i.e. after extracting all records, the number of extracted records is less than flush.size) and no more data is streamed into the platform, than HDFS will not commit last temp files. To prevent data loss, a file commit is invoked every 15 minutes (900000 milliseconds) . --- .../radar-cp-hadoop-stack/etc/sink-hdfs.properties.template | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template index d5d598c5f..5feb5bd1f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/sink-hdfs.properties.template @@ -3,6 +3,7 @@ connector.class=io.confluent.connect.hdfs.HdfsSinkConnector tasks.max=4 topics=android_empatica_e4_electrodermal_activity,android_empatica_e4_blood_volume_pulse,android_empatica_e4_temperature flush.size=150 +rotate.interval.ms=900000 hdfs.url=hdfs://hdfs-namenode:8020 format.class=org.radarcns.sink.hdfs.AvroFormatRadar -topics.dir=topicAndroidNew \ No newline at end of file +topics.dir=topicAndroidNew From d9eb1a1ecad381e018d672205ce71090545eb349 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 1 Jun 2017 15:18:42 +0100 Subject: [PATCH 171/197] Script to run hdfs_restructure.sh by CRONTAB --- scripts/README.md | 8 +++- scripts/restracture-backup-hdfs.sh | 66 ++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 scripts/restracture-backup-hdfs.sh diff --git a/scripts/README.md b/scripts/README.md index c0ef0a24a..108d3d951 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -8,4 +8,10 @@ This folder contains useful scripts to manage the server where the RADAR-CNS Pla - `logfile` is the log file where the script logs each operation - `url` is the web site used to check the connectivity -To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the script are absolute. +- `restracture-backup-hdfs.sh` + - `logfile` is the log file where the script logs each operation + - `working_directory` is the directory where the `hdfs_restructure.sh` script is located. + - `storage_directory` is the directory where the extracted data will be stored + - `lockfile` lock usefull to check whether there is a previous instance still running + +To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /absolute/path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the script are absolute. diff --git a/scripts/restracture-backup-hdfs.sh b/scripts/restracture-backup-hdfs.sh new file mode 100644 index 000000000..0e05ffaaa --- /dev/null +++ b/scripts/restracture-backup-hdfs.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin + +# log file +logfile= + +# working directory +working_directory= + +# landing folder +storage_directory= + +# lock file +lockfile= + +# involved HDFS directory +hdfs_directory=/topicAndroidNew + +# maximum file size in byte to rotate log +minimumsize=10000000 + +# current time +timestamp=$(date '+%Y-%m-%d %H:%M:%S'); + +# write message in the log file +log_info() { + echo "$timestamp - $@" >> $logfile 2>&1 +} + +# remove old lock +checkLock() { + uptime=$(> $logfile 2>&1) + log_info "Removing lock ..." + rm $lockfile +else + log_info "Another instance is already running ... " +fi +log_info "### DONE ###" + +# check if log size exceeds the limit. If so, it rotates the log file +actualsize=$(wc -c <"$logfile") + +if [ $actualsize -ge $minimumsize ]; then + timestamp=$(date '+%d-%m-%Y_%H-%M-%S'); + cp $logfile $logfile"_"$timestamp + > $logfile +fi \ No newline at end of file From cc909aef553e8ca3ae551ec9d296de39dfcc4914 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 1 Jun 2017 15:28:58 +0100 Subject: [PATCH 172/197] Updated script README --- scripts/README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/scripts/README.md b/scripts/README.md index 108d3d951..107efce6e 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -14,4 +14,18 @@ This folder contains useful scripts to manage the server where the RADAR-CNS Pla - `storage_directory` is the directory where the extracted data will be stored - `lockfile` lock usefull to check whether there is a previous instance still running -To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add `*/2 * * * * /absolute/path/to/script-name.sh` at the end of the file. In this way, the script will be fired every `2` minutes. Before deploying the task, check that all paths used by the script are absolute. +To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u root` and add your task at the end of the file. The syntax is +```shell +* * * * * command to be executed +- - - - - +| | | | | +| | | | +----- day of week (0 - 6) (Sunday=0) +| | | +------- month (1 - 12) +| | +--------- day of month (1 - 31) +| +----------- hour (0 - 23) ++------------- min (0 - 59) +``` + +For example, `*/2 * * * * /absolute/path/to/script-name.sh` will execute `script-name.sh` every `2` minutes. + +Before deploying the task, make sure that all paths used by the script are absolute. From fa9b9eb9284e8e6152823e80683ba0795c576af8 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Mon, 5 Jun 2017 11:18:56 +0100 Subject: [PATCH 173/197] General entry point for scripts --- scripts/README.md | 6 +- scripts/check_radar_network.sh | 29 ++++++++ scripts/restracture-backup-hdfs.sh | 66 ----------------- scripts/restracture_backup_hdfs.sh | 33 +++++++++ scripts/{check-radar-network.sh => util.sh} | 79 +++++++++------------ 5 files changed, 97 insertions(+), 116 deletions(-) create mode 100755 scripts/check_radar_network.sh delete mode 100644 scripts/restracture-backup-hdfs.sh create mode 100755 scripts/restracture_backup_hdfs.sh rename scripts/{check-radar-network.sh => util.sh} (60%) mode change 100644 => 100755 diff --git a/scripts/README.md b/scripts/README.md index 107efce6e..daf362a38 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -2,13 +2,13 @@ This folder contains useful scripts to manage the server where the RADAR-CNS Platform is running. -- `check-radar-network.sh` checks if the machine is connected to internet. The script can be parametrised with +- `check_radar_network.sh` checks if the machine is connected to internet. The script can be parametrised with - `nic` is the internet gateway - `lockfile` lock usefull to check whether there is a previous instance still running - `logfile` is the log file where the script logs each operation - `url` is the web site used to check the connectivity -- `restracture-backup-hdfs.sh` +- `restracture_backup_hdfs.sh` - `logfile` is the log file where the script logs each operation - `working_directory` is the directory where the `hdfs_restructure.sh` script is located. - `storage_directory` is the directory where the extracted data will be stored @@ -28,4 +28,4 @@ To add a script to `CRON` as `root`, run on the command-line `sudo crontab -e -u For example, `*/2 * * * * /absolute/path/to/script-name.sh` will execute `script-name.sh` every `2` minutes. -Before deploying the task, make sure that all paths used by the script are absolute. +Before deploying the task, make sure that all paths used by the script are absolute. Replace the relative path to `util.sh` with the absolute one. diff --git a/scripts/check_radar_network.sh b/scripts/check_radar_network.sh new file mode 100755 index 000000000..6e9de68eb --- /dev/null +++ b/scripts/check_radar_network.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# network interface +network=eduroam +# network interface +nic=wlp5s1 +# lock file +lockfile=/home/radar/RADAR-Network/LOCK_RETRY +# log file +logfile=/home/radar/RADAR-Network/radar-network.log +# url to check against +url=https://www.empatica.com + +. ./util.sh + +# check connection and force reconnection if needed +if [ ! -f $lockfile ]; then + touch $lockfile + if ! isConnected; then + connect + fi + rm $lockfile +else + log_info "Another instance is already running ... " +fi +log_info "### DONE ###" + +# check if log size exceeds the limit. If so, it rotates the log file +rolloverLog diff --git a/scripts/restracture-backup-hdfs.sh b/scripts/restracture-backup-hdfs.sh deleted file mode 100644 index 0e05ffaaa..000000000 --- a/scripts/restracture-backup-hdfs.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin - -# log file -logfile= - -# working directory -working_directory= - -# landing folder -storage_directory= - -# lock file -lockfile= - -# involved HDFS directory -hdfs_directory=/topicAndroidNew - -# maximum file size in byte to rotate log -minimumsize=10000000 - -# current time -timestamp=$(date '+%Y-%m-%d %H:%M:%S'); - -# write message in the log file -log_info() { - echo "$timestamp - $@" >> $logfile 2>&1 -} - -# remove old lock -checkLock() { - uptime=$(> $logfile 2>&1) - log_info "Removing lock ..." - rm $lockfile -else - log_info "Another instance is already running ... " -fi -log_info "### DONE ###" - -# check if log size exceeds the limit. If so, it rotates the log file -actualsize=$(wc -c <"$logfile") - -if [ $actualsize -ge $minimumsize ]; then - timestamp=$(date '+%d-%m-%Y_%H-%M-%S'); - cp $logfile $logfile"_"$timestamp - > $logfile -fi \ No newline at end of file diff --git a/scripts/restracture_backup_hdfs.sh b/scripts/restracture_backup_hdfs.sh new file mode 100755 index 000000000..369e1bcff --- /dev/null +++ b/scripts/restracture_backup_hdfs.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# log file +logfile= + +# working directory +working_directory= + +# landing folder +storage_directory= + +# lock file +lockfile= + +# involved HDFS directory +hdfs_directory=/topicAndroidNew + +. ./util.sh + +# extract file from hdfs to backup storage if no other instance is running +if [ ! -f $lockfile ]; then + log_info "Creating lock ..." + touch $lockfile + (cd $working_directory && ./hdfs_restructure.sh $hdfs_directory $storage_directory >> $logfile 2>&1) + log_info "Removing lock ..." + rm $lockfile +else + log_info "Another instance is already running ... " +fi +log_info "### DONE ###" + +# check if log size exceeds the limit. If so, it rotates the log file +rolloverLog \ No newline at end of file diff --git a/scripts/check-radar-network.sh b/scripts/util.sh old mode 100644 new mode 100755 similarity index 60% rename from scripts/check-radar-network.sh rename to scripts/util.sh index 0b9c186b3..e90e5a800 --- a/scripts/check-radar-network.sh +++ b/scripts/util.sh @@ -1,28 +1,42 @@ #!/bin/bash -# network interface -network=eduroam -# network interface -nic=wlp5s1 -# lock file -lockfile=/home/radar/RADAR-Network/LOCK_RETRY -# log file -logfile=/home/radar/RADAR-Network/radar-network.log -# url to check against -url=https://www.empatica.com +PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin # maximum file size in byte to rotate log minimumsize=10000000 # current time -timestamp=$(date '+%d/%m/%Y %H:%M:%S'); +timestamp=$(date '+%Y-%m-%d %H:%M:%S'); -# write message in the log file +# Write message in the log file log_info() { echo "$timestamp - $@" >> $logfile 2>&1 } -# check connection +# Remove old lock +checkLock() { + uptime=$( $logfile + fi +} + +# Check connection isConnected() { case "$(curl -s --max-time 10 --retry 5 -I $url | sed 's/^[^ ]* *\([0-9]\).*/\1/; 1q')" in [23]) log_info "HTTP connectivity is up" && return 0;; @@ -31,7 +45,7 @@ isConnected() { esac } -# force connection +# Force connection connect() { log_info "Forcing reconnection" sudo nmcli conn down $network >> $logfile 2>&1 @@ -51,37 +65,8 @@ connect() { log_info "Completed" } -# remove old lock -checkLock() { - uptime=$( $logfile -fi +log_info "### $timestamp ###" +log_info "Checking lock ..." +checkLock \ No newline at end of file From 3441ad9032d02768fdfef1a53d1d9aa768f87fc8 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 8 Jun 2017 10:03:17 +0100 Subject: [PATCH 174/197] Switched Rest-Api version --- images/radar-restapi/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index c1327f5c9..1ff4f40c5 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -18,7 +18,7 @@ MAINTAINER @fnobilia, @afolarin LABEL description="RADAR-CNS Rest Api docker container" # Install Rest API -ADD https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1-beta.1/radar.war /usr/local/tomcat/webapps +ADD https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1.1/radar-0.1.1.war /usr/local/tomcat/webapps EXPOSE 8080 From 5696201ebe543afc01bf4a528ace08d671050e45 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 8 Jun 2017 10:52:22 +0100 Subject: [PATCH 175/197] Rest API 0.1.1 --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index a930d2432..ed4e8d8bc 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -187,7 +187,7 @@ services: # RADAR REST API # #---------------------------------------------------------------------------# rest-api: - image: radarcns/radar-restapi:0.1 + image: radarcns/radar-restapi:0.1.1 networks: - hotstorage - api From 36bda3cf122b43d6816b106812222016b6c82079 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Thu, 8 Jun 2017 11:24:24 +0100 Subject: [PATCH 176/197] Renaming war while coping --- images/radar-restapi/Dockerfile | 2 +- images/radar-restapi/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/radar-restapi/Dockerfile b/images/radar-restapi/Dockerfile index 1ff4f40c5..c511f6282 100644 --- a/images/radar-restapi/Dockerfile +++ b/images/radar-restapi/Dockerfile @@ -18,7 +18,7 @@ MAINTAINER @fnobilia, @afolarin LABEL description="RADAR-CNS Rest Api docker container" # Install Rest API -ADD https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1.1/radar-0.1.1.war /usr/local/tomcat/webapps +ADD https://github.com/RADAR-CNS/RADAR-RestApi/releases/download/v0.1.1/radar-0.1.1.war /usr/local/tomcat/webapps/radar.war EXPOSE 8080 diff --git a/images/radar-restapi/README.md b/images/radar-restapi/README.md index 105f5e1f0..0792e25c6 100644 --- a/images/radar-restapi/README.md +++ b/images/radar-restapi/README.md @@ -14,7 +14,7 @@ Edit `radar.yml` and `device-catalog.yml`, and place them under `/path/to/config Run the docker image locally: ``` -$ docker run -d -p 8080:8080 -v /path/to/config:/usr/local/tomcat/conf/radar --name radar-restapi radarcns/radar-restapi:0.1 +$ docker run -d -p 8080:8080 -v /path/to/config:/usr/local/tomcat/conf/radar --name radar-restapi radarcns/radar-restapi:0.1.1 ``` The RestApi will be running at http://localhost:8080. To test them, access the [Swagger Documentation](http://localhost:8080/radar/api/swagger.json) From 2d774db9e83c781445b0c42ebb12e9b0af821d40 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 12 Jun 2017 11:05:28 +0200 Subject: [PATCH 177/197] Possible to install radar stack with limited number of containers --- dcompose-stack/radar-cp-hadoop-stack/README.md | 2 +- dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 237865376..8a94e97d1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -14,7 +14,7 @@ Run ```shell ./install-radar-stack.sh ``` -to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. Note: whenever `.env` or `docker-compose.yml` are modified, this script needs to be called again. +to start all the RADAR services. Use the `(start|stop|reboot)-radar-stack.sh` to start, stop or reboot it. Note: whenever `.env` or `docker-compose.yml` are modified, this script needs to be called again. To start a reduced set of containers, call `install-radar-stack.sh` with the intended containers as arguments. Raw data can be extracted from this setup by running: diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index d0b2aaf2b..a7e76f5ec 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -55,7 +55,7 @@ sed_i 's|\(/etc/letsencrypt/live/\)[^/]*\(/.*\.pem\)|\1'"${SERVER_NAME}"'\2|' et init_certificate "${SERVER_NAME}" echo "==> Starting RADAR-CNS Platform" -sudo-linux docker-compose up --force-recreate -d +sudo-linux docker-compose up --force-recreate -d "$@" request_certificate "${SERVER_NAME}" "${SELF_SIGNED_CERT:-yes}" echo "### SUCCESS ###" From c3d4f24d8a7552fa225096d32527df2f77286b44 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 12 Jun 2017 11:08:53 +0200 Subject: [PATCH 178/197] Small readability change --- dcompose-stack/radar-cp-hadoop-stack/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 8a94e97d1..c28a88ff9 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -30,4 +30,4 @@ CSV-structured data can be gotten from HDFS by running ``` This will put all CSV files in the destination directory, with subdirectory structure `PatientId/SensorType/Date_Hour.csv`. -If `SELF_SIGNED_CERT=no` in `./.env`, be sure to run `./renew_ssl_certificate.sh` every day to ensure that your certificate does not expire. +If `SELF_SIGNED_CERT=no` in `./.env`, be sure to run `./renew_ssl_certificate.sh` daily to ensure that your certificate does not expire. From 47bae377be810559ad861019178fd9bbecd56144 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 4 Jul 2017 11:20:51 +0200 Subject: [PATCH 179/197] Docker version is up to date now --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index baccb74c0..fd97af7d2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,6 @@ env: DOCKER_COMPOSE_VERSION: 1.11.2 before_install: - - sudo apt-get update - - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-engine - docker --version - mkdir -p "$HOME/bin"; - export PATH="$HOME/bin:$PATH"; From b62f574f63c9f33440d34604e23f601fb75146f1 Mon Sep 17 00:00:00 2001 From: Francesco Nobilia Date: Wed, 5 Jul 2017 17:47:57 +0100 Subject: [PATCH 180/197] Added PHQ8 topic --- dcompose-stack/radar-cp-hadoop-stack/etc/env.template | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index 3a84320d3..f1ac76956 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -1,8 +1,8 @@ SERVER_NAME=localhost SELF_SIGNED_CERT=yes -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,schemaless-key,schemaless-value -RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered -RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8,schemaless-key,schemaless-value +RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8 +RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,active_questionnaire_phq8 HOTSTORAGE_USERNAME= HOTSTORAGE_PASSWORD=XXXXXXXX HOTSTORAGE_NAME= From 2a32d58f211e0e84a08336e3cf192a2b6b609aa6 Mon Sep 17 00:00:00 2001 From: Sebastian Boettcher Date: Wed, 12 Jul 2017 17:20:43 +0200 Subject: [PATCH 181/197] fixing dashboard integration --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++-- dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index ed4e8d8bc..74ae75342 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -213,9 +213,9 @@ services: - rest-api restart: always environment: - API_URI: http://${SERVER_NAME}:8080/api + API_URI: https://${SERVER_NAME}/api healthcheck: - test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:3000/"] + test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:80/"] interval: 1m timeout: 5s retries: 3 diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 9037eb1d0..047f1f82f 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -72,7 +72,7 @@ http { proxy_set_header Connection ""; } location /dashboard/ { - proxy_pass http://dashboard:3000/; + proxy_pass http://dashboard:80/; proxy_set_header Host $host; } location /portainer/ { From dc0df091cf478d2db73e4a74e4fca860be911adc Mon Sep 17 00:00:00 2001 From: Sebastian Boettcher Date: Wed, 12 Jul 2017 18:33:43 +0200 Subject: [PATCH 182/197] account for additional dashboard fix --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 74ae75342..77d1b77a7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -214,6 +214,7 @@ services: restart: always environment: API_URI: https://${SERVER_NAME}/api + BASE_HREF: /dashboard/ healthcheck: test: ["CMD", "curl", "-IX", "HEAD", "http://localhost:80/"] interval: 1m From a8d6cc563c946f4318f82a79b953c03f63e3e55f Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Thu, 13 Jul 2017 11:14:18 +0200 Subject: [PATCH 183/197] Fix dashboard docker image tag --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 77d1b77a7..d52a1f165 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -206,7 +206,7 @@ services: # RADAR Dashboard # #---------------------------------------------------------------------------# dashboard: - image: radarcns/radar-dashboard:latest + image: radarcns/radar-dashboard:2.0.5-beta-base-href networks: - api depends_on: From b04a150fd3bc3cff03dccc790bd88ba7edca4019 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Thu, 13 Jul 2017 15:24:12 +0200 Subject: [PATCH 184/197] Small util fixes --- dcompose-stack/radar-cp-hadoop-stack/util.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/util.sh b/dcompose-stack/radar-cp-hadoop-stack/util.sh index d66a4ed34..36d2c20b7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/util.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/util.sh @@ -12,8 +12,8 @@ function catch_errors() { # Check whether given command exists and call it with the --version flag. check_command_exists() { - if command -v "$1" > /dev/null 2>&1; then - echo "$1 version: $($1 --version)" + if sudo-linux /bin/bash -c "command -v "$1" > /dev/null 2>&1"; then + echo "$1 version: $(sudo-linux "$1" --version)" else echo "RADAR-CNS cannot start without $1. Please, install $1 and then try again" exit 1 @@ -93,7 +93,7 @@ letsencrypt_certonly() { sudo-linux docker run --rm -v certs:/etc/openssl alpine:3.5 /bin/sh -c "find /etc/openssl -name '${SERVER_NAME}*' -prune -exec rm -rf '{}' +" CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) - CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") + CERTBOT_OPTS=(--webroot --webroot-path=/data/letsencrypt --agree-tos -m "${MAINTAINER_EMAIL}" -d "${SERVER_NAME}" --non-interactive) sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" # mark the directory as letsencrypt dir @@ -104,7 +104,7 @@ letsencrypt_renew() { SERVER_NAME=$1 echo "==> Renewing Let's Encrypt SSL certificate for ${SERVER_NAME}" CERTBOT_DOCKER_OPTS=(-i --rm -v certs:/etc/letsencrypt -v certs-data:/data/letsencrypt deliverous/certbot) - CERTBOT_OPTS=(-n --webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}") + CERTBOT_OPTS=(-n --webroot --webroot-path=/data/letsencrypt -d "${SERVER_NAME}" --non-interactive) sudo-linux docker run "${CERTBOT_DOCKER_OPTS[@]}" certonly "${CERTBOT_OPTS[@]}" } From 6974334761478e2276ac9f1f01caddf8a4f9ec15 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Tue, 1 Aug 2017 10:48:05 +0200 Subject: [PATCH 185/197] Also add MAINTAINER_EMAIL to env.template --- dcompose-stack/radar-cp-hadoop-stack/etc/env.template | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index f1ac76956..77eb4046e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -1,5 +1,6 @@ SERVER_NAME=localhost SELF_SIGNED_CERT=yes +MAINTAINER_EMAIL=me@example.com RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8,schemaless-key,schemaless-value RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8 RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,active_questionnaire_phq8 From 84075380c240314d3b3325015bde5d6199f3ea48 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Wed, 9 Aug 2017 08:55:50 +0200 Subject: [PATCH 186/197] Increased memory for the HDFS sink connector (it sometimes needs it) --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index d52a1f165..a83d927da 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -357,6 +357,7 @@ services: CONNECT_ZOOKEEPER_CONNECT: zookeeper-1:2181 TOPIC_LIST: ${RADAR_TOPIC_LIST} CONNECTOR_PROPERTY_FILE_PREFIX: "sink-hdfs" + KAFKA_HEAP_OPTS: "-Xms256m -Xmx768m" #---------------------------------------------------------------------------# # RADAR backend streams # From 38e4bc769445672265ecbd6eded940d96c7e1665 Mon Sep 17 00:00:00 2001 From: Joris Borgdorff Date: Mon, 14 Aug 2017 09:22:43 +0200 Subject: [PATCH 187/197] Added log retention Kafka of 1 week --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index a83d927da..7be33f4b1 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -71,6 +71,7 @@ services: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" + KAFKA_LOG_RETENTION_HOURS: 730 kafka-2: image: confluentinc/cp-kafka:3.1.2-1 @@ -87,6 +88,7 @@ services: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" + KAFKA_LOG_RETENTION_HOURS: 730 kafka-3: image: confluentinc/cp-kafka:3.1.2-1 @@ -103,6 +105,7 @@ services: KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" + KAFKA_LOG_RETENTION_HOURS: 730 #---------------------------------------------------------------------------# # Schema Registry # From 61029a5ee27b7791cb447df40a2c667b434c0dd1 Mon Sep 17 00:00:00 2001 From: yatharthranjan Date: Thu, 14 Sep 2017 16:17:05 +0100 Subject: [PATCH 188/197] Integrated the Redcap integration web app into the stack --- .../radar-cp-hadoop-stack/docker-compose.yml | 27 ++++++++++++++++++- .../etc/nginx.conf.template | 14 ++++++---- .../etc/redcap-integration/radar.yml.template | 21 +++++++++++++++ .../install-radar-stack.sh | 3 +++ 4 files changed, 59 insertions(+), 6 deletions(-) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/etc/redcap-integration/radar.yml.template diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 7be33f4b1..031a2f8ed 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -22,6 +22,9 @@ networks: hotstorage: driver: bridge internal: true + redcap: + driver: bridge + internal: true volumes: kafka-1-data: {} @@ -205,6 +208,26 @@ services: timeout: 5s retries: 3 + + #---------------------------------------------------------------------------# + # RADAR REDCap Integration # + #---------------------------------------------------------------------------# + radar-integration: + image: radarcns/radar-redcapintegration:latest + networks: + - redcap + - default + #depends_on: + #- hotstorage + restart: always + volumes: + - "./etc/redcap-integration:/usr/local/tomcat/conf/radar" + healthcheck: + test: ["CMD", "curl", "-IX", "POST", "http://localhost:8080/redcap/trigger"] + interval: 1m + timeout: 5s + retries: 3 + #---------------------------------------------------------------------------# # RADAR Dashboard # #---------------------------------------------------------------------------# @@ -410,7 +433,7 @@ services: restart: always environment: KAFKA_REST_PROXY: http://rest-proxy-1:8082 - TOPIC_LIST: ${RADAR_TOPIC_LIST} + TOPIC_LIST: ${RADAR_TOPIC_LIST} #---------------------------------------------------------------------------# # Docker Monitoring # @@ -445,6 +468,7 @@ services: restart: always networks: - api + - redcap - monitor - default depends_on: @@ -454,6 +478,7 @@ services: - schema-registry-1 - rest-proxy-1 - dashboard + - radar-integration ports: - "80:80" - "443:443" diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 047f1f82f..0204e3042 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -22,7 +22,7 @@ http { server { listen 80 default_server; listen [::]:80 default_server; - server_name localhost; + server_name radar-backend.ddns.net; location / { return 301 https://$server_name$request_uri; } @@ -35,7 +35,7 @@ http { server { # simple reverse-proxy listen 443 ssl http2 default_server; listen [::]:443 ssl http2 default_server; - server_name localhost; + server_name radar-backend.ddns.net; ssl on; @@ -52,9 +52,9 @@ http { ssl_stapling_verify on; resolver 8.8.8.8 8.8.4.4; - ssl_certificate /etc/letsencrypt/live/localhost/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/localhost/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/localhost/chain.pem; + ssl_certificate /etc/letsencrypt/live/radar-backend.ddns.net/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/radar-backend.ddns.net/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/radar-backend.ddns.net/chain.pem; access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; @@ -90,6 +90,10 @@ http { proxy_pass http://rest-api:8080/radar/api/; proxy_set_header Host $host; } + location /redcapint/ { + proxy_pass http://radar-integration:8080/redcap/; + proxy_set_header Host $host; + } location /frontend/config/ { proxy_pass http://rest-api:8080/radar/frontend/; proxy_set_header Host $host; diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/redcap-integration/radar.yml.template b/dcompose-stack/radar-cp-hadoop-stack/etc/redcap-integration/radar.yml.template new file mode 100644 index 000000000..78e083ffe --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/redcap-integration/radar.yml.template @@ -0,0 +1,21 @@ +version: 0.1-alpha +released: 2017-08-29 + +# ManagementPortal configuration +oauth_client_id: #OAuth2 clientId used by the webApp for making requests +oauth_client_secret: #OAuth2 client secrete +management_portal_url: #URL pointing Management Portal +token_endpoint: #URL managing tokens +project_endpoint: #URL managing project function +subject_endpoint: #URL managing subject functions + +# Set of supported projects +projects: + - redcap_info: + url: #URL pointing REDCap instance + project_id: #REDCap project identifier + enrolment_event: #Unique identifier for the enrolment event + integration_form: #Name of integration REDCap form + token: #REDCap API Token used to identify the REDCap user against the REDCap instance + mp_info: + project_id: #Management Portal project identifier diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index a7e76f5ec..10636bc54 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -40,6 +40,9 @@ echo "==> Configuring REST-API" copy_template_if_absent etc/rest-api/radar.yml copy_template_if_absent etc/rest-api/device-catalog.yml +echo "==> Configuring REDCap-Integration" +copy_template_if_absent etc/redcap-integration/radar.yml + # Set MongoDb credential inline_variable 'usr:[[:space:]]' $HOTSTORAGE_USERNAME etc/rest-api/radar.yml inline_variable 'pwd:[[:space:]]' $HOTSTORAGE_PASSWORD etc/rest-api/radar.yml From 929c004ec9053d10f83d6c007e47de80a852f1ec Mon Sep 17 00:00:00 2001 From: Yatharth Ranjan Date: Thu, 14 Sep 2017 17:05:10 +0100 Subject: [PATCH 189/197] Added info about redcap integration --- dcompose-stack/radar-cp-hadoop-stack/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index c28a88ff9..ef79a74b9 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -2,10 +2,13 @@ ## Configuration -First move `etc/env.template` file to `./.env` and check and modify all its variables. To have a valid HTTPS connection for a public host, set `SELF_SIGNED_CERT=no`. +First move `etc/env.template` file to `./.env` and check and modify all its variables. To have a valid HTTPS connection for a public host, set `SELF_SIGNED_CERT=no`. You need to provide a public valid DNS name as `SERVER_NAME` for SSL certificate to work. IP addresses will not work. Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. +Modify the `etc/redcap-integration/radar.yml.template` to configure the properties of Redcap instance and the management portal, and move it to `etc/redcap-integration/radar.yml`. For reference on cofiguration of this file look at the Readme file here - https://github.com/RADAR-CNS/RADAR-RedcapIntegration#configuration +In the REDcap portal under Project Setup, define the Data Trigger as `https:///redcapint/trigger` + Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. ## Usage From 9d25c6a0508520d45118c23d45a5495049014ee2 Mon Sep 17 00:00:00 2001 From: Yatharth Ranjan Date: Thu, 14 Sep 2017 18:29:21 +0100 Subject: [PATCH 190/197] Update nginx.conf.template Some minor changes --- .../radar-cp-hadoop-stack/etc/nginx.conf.template | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index 0204e3042..cca6a8a98 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -22,7 +22,7 @@ http { server { listen 80 default_server; listen [::]:80 default_server; - server_name radar-backend.ddns.net; + server_name localhost; location / { return 301 https://$server_name$request_uri; } @@ -35,7 +35,7 @@ http { server { # simple reverse-proxy listen 443 ssl http2 default_server; listen [::]:443 ssl http2 default_server; - server_name radar-backend.ddns.net; + server_name localhost; ssl on; @@ -52,9 +52,9 @@ http { ssl_stapling_verify on; resolver 8.8.8.8 8.8.4.4; - ssl_certificate /etc/letsencrypt/live/radar-backend.ddns.net/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/radar-backend.ddns.net/privkey.pem; - ssl_trusted_certificate /etc/letsencrypt/live/radar-backend.ddns.net/chain.pem; + ssl_certificate /etc/letsencrypt/live/localhost/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/localhost/privkey.pem; + ssl_trusted_certificate /etc/letsencrypt/live/localhost/chain.pem; access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; From 8428c22df78f90c7dfcbaa7b2345ee2d61695a11 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 4 Oct 2017 16:29:50 +0200 Subject: [PATCH 191/197] adds MP into docker-stack, required MP to be build from source though --- .../managementportal/Dockerfile | 13 +++++++++++++ .../radar-cp-hadoop-stack/postgresql.yml | 14 ++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile create mode 100644 dcompose-stack/radar-cp-hadoop-stack/postgresql.yml diff --git a/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile new file mode 100644 index 000000000..3eb1bb213 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile @@ -0,0 +1,13 @@ +FROM openjdk:8-jre-alpine + +ENV SPRING_OUTPUT_ANSI_ENABLED=ALWAYS \ + JHIPSTER_SLEEP=0 + +# add directly the war +ADD *.war /app.war + +VOLUME /tmp +EXPOSE 8080 5701/udp +CMD echo "The application will start in ${JHIPSTER_SLEEP}s..." && \ + sleep ${JHIPSTER_SLEEP} && \ + java $JAVA_OPTS -Djava.security.egd=file:/dev/./urandom -jar /app.war diff --git a/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml b/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml new file mode 100644 index 000000000..c18cf4f67 --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml @@ -0,0 +1,14 @@ +version: '2' +services: + managementportal-postgresql: + image: postgres:9.6.2 + # volumes: + # - ~/volumes/jhipster/managementportal/postgresql/:/var/lib/postgresql/ + environment: + - POSTGRES_USER= + - POSTGRES_PASSWORD= + - POSTGRES_DB=managementportal + ports: + - 5432:5432 + + From e89c7ef3cb833776e779e7c9e88a309dd90f9cd6 Mon Sep 17 00:00:00 2001 From: nivethika Date: Wed, 4 Oct 2017 16:33:50 +0200 Subject: [PATCH 192/197] adds MP into docker-stack --- .../radar-cp-hadoop-stack/.gitignore | 2 ++ .../radar-cp-hadoop-stack/README.md | 24 +++++++++++--- .../radar-cp-hadoop-stack/docker-compose.yml | 31 +++++++++++++++++++ .../radar-cp-hadoop-stack/etc/env.template | 3 ++ .../etc/nginx.conf.template | 5 +++ .../etc/smtp.env.template | 2 ++ .../radar-cp-hadoop-stack/postgresql.yml | 22 ++++++------- 7 files changed, 74 insertions(+), 15 deletions(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/.gitignore b/dcompose-stack/radar-cp-hadoop-stack/.gitignore index 4668dd37a..7723e45d4 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/.gitignore +++ b/dcompose-stack/radar-cp-hadoop-stack/.gitignore @@ -1,2 +1,4 @@ /.env +/etc/smtp.env +/radar.yml /output/ diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index ef79a74b9..87975fe2e 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -2,14 +2,30 @@ ## Configuration -First move `etc/env.template` file to `./.env` and check and modify all its variables. To have a valid HTTPS connection for a public host, set `SELF_SIGNED_CERT=no`. You need to provide a public valid DNS name as `SERVER_NAME` for SSL certificate to work. IP addresses will not work. +1. First move `etc/env.template` file to `./.env` and check and modify all its variables. To have a valid HTTPS connection for a public host, set `SELF_SIGNED_CERT=no`. You need to provide a public valid DNS name as `SERVER_NAME` for SSL certificate to work. IP addresses will not work. -Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. +2. Modify `etc/smtp.env.template` to set a SMTP host to send emails with, and move it to `etc/smtp.env`. The configuration settings are passed to a [namshi/smtp](https://hub.docker.com/r/namshi/smtp/) Docker container. This container supports a.o. regular SMTP and GMail. -Modify the `etc/redcap-integration/radar.yml.template` to configure the properties of Redcap instance and the management portal, and move it to `etc/redcap-integration/radar.yml`. For reference on cofiguration of this file look at the Readme file here - https://github.com/RADAR-CNS/RADAR-RedcapIntegration#configuration +3. Modify the `etc/redcap-integration/radar.yml.template` to configure the properties of Redcap instance and the management portal, and move it to `etc/redcap-integration/radar.yml`. For reference on configuration of this file look at the Readme file here - https://github.com/RADAR-CNS/RADAR-RedcapIntegration#configuration In the REDcap portal under Project Setup, define the Data Trigger as `https:///redcapint/trigger` -Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. +4. ### Build ManagementPortal from source ( Required to build it from source for secured deployment at the moment) + +4.1. Clone ManagementPortal +```shell +git clone https://github.com/RADAR-CNS/ManagementPortal.git +``` +4.2. Change OAuth2 client credentials for production environment at `src/main/resources/config/liquibase/oauth_client_details.csv` + +4.3. Change the base href url to `` at `src/main/webapp/index.html`. + +4.4. Build ManagementPortal for production +```shell +./gradlew bootRepackage -Pprod buildDocker +``` +4.5. Copy built `.war` file to `/managementportal/` + +5. Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. ## Usage diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 031a2f8ed..495313b2a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -25,6 +25,9 @@ networks: redcap: driver: bridge internal: true + management: + driver: bridge + internal: true volumes: kafka-1-data: {} @@ -217,6 +220,7 @@ services: networks: - redcap - default + - management #depends_on: #- hotstorage restart: always @@ -291,6 +295,7 @@ services: smtp: image: namshi/smtp:latest networks: + - default - mail volumes: - /var/spool/exim @@ -486,3 +491,29 @@ services: - certs:/etc/letsencrypt - certs-data:/data/letsencrypt - "./etc/nginx.conf:/etc/nginx/nginx.conf:ro" + + managementportal-app: + image: managementportal + build: managementportal + networks: + - default + - management + - mail + environment: + SPRING_PROFILES_ACTIVE: prod,swagger + SPRING_DATASOURCE_URL: jdbc:postgresql://managementportal-postgresql:5432/managementportal + SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER} + SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD} + MANAGEMENTPORTAL_MAIL_FROM: ${FROM_EMAIL} + MANAGEMENTPORTAL_MAIL_BASEURL: https://${SERVER_NAME}/managementportal + JHIPSTER_SLEEP: 10 # gives time for the database to boot before the application + JAVA_OPTS: "-Xmx256m" # maximum heap size for the JVM running ManagementPortal, increase this as necessary +# ports: +# - "8081:8080" + + managementportal-postgresql: + networks: + - management + extends: + file: postgresql.yml + service: managementportal-postgresql \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index 77eb4046e..f047daf46 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -12,3 +12,6 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb +POSTGRES_USER= +POSTGRES_PASSWORD= +FROM_EMAIL= \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template index cca6a8a98..1ff380f0d 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/nginx.conf.template @@ -98,5 +98,10 @@ http { proxy_pass http://rest-api:8080/radar/frontend/; proxy_set_header Host $host; } + + location /managementportal/{ + proxy_pass http://managementportal-app:8080/; + proxy_set_header Host $host; + } } } diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template index ab7fe491d..3100006bc 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/smtp.env.template @@ -2,3 +2,5 @@ SMARTHOST_ADDRESS=mail.example.com SMARTHOST_PORT=587 SMARTHOST_USER=user@example.com SMARTHOST_PASSWORD=XXXXXXXX +SMARTHOST_ALIASES=*.amazonaws.com +RELAY_NETWORKS=:172.0.0.0/8:192.168.0.0/16 \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml b/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml index c18cf4f67..e64e80d7a 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml @@ -1,14 +1,14 @@ -version: '2' +version: '2.1' services: - managementportal-postgresql: - image: postgres:9.6.2 - # volumes: - # - ~/volumes/jhipster/managementportal/postgresql/:/var/lib/postgresql/ - environment: - - POSTGRES_USER= - - POSTGRES_PASSWORD= - - POSTGRES_DB=managementportal - ports: - - 5432:5432 + managementportal-postgresql: + image: postgres:9.6.2 + # volumes: + # - ~/volumes/jhipster/managementportal/postgresql/:/var/lib/postgresql/ + environment: + POSTGRES_USER : ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: managementportal + ports: + - 5432:5432 From b23b5871eefe7605a76b6dd9dfad20be8bc62a3f Mon Sep 17 00:00:00 2001 From: nivethika Date: Fri, 6 Oct 2017 16:47:26 +0200 Subject: [PATCH 193/197] configurable MP using volumes and env varibles --- dcompose-stack/radar-cp-hadoop-stack/README.md | 16 +--------------- .../radar-cp-hadoop-stack/docker-compose.yml | 9 ++++----- .../liquibase/oauth_client_details.csv.template | 6 ++++++ .../managementportal/Dockerfile | 13 ------------- images/managementportal/Dockerfile | 14 ++++++++++++++ .../config/liquibase/oauth_client_details.csv | 6 ++++++ 6 files changed, 31 insertions(+), 33 deletions(-) create mode 100644 dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template delete mode 100644 dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile create mode 100644 images/managementportal/Dockerfile create mode 100644 images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index 87975fe2e..a0c72ffab 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -9,21 +9,7 @@ 3. Modify the `etc/redcap-integration/radar.yml.template` to configure the properties of Redcap instance and the management portal, and move it to `etc/redcap-integration/radar.yml`. For reference on configuration of this file look at the Readme file here - https://github.com/RADAR-CNS/RADAR-RedcapIntegration#configuration In the REDcap portal under Project Setup, define the Data Trigger as `https:///redcapint/trigger` -4. ### Build ManagementPortal from source ( Required to build it from source for secured deployment at the moment) - -4.1. Clone ManagementPortal -```shell -git clone https://github.com/RADAR-CNS/ManagementPortal.git -``` -4.2. Change OAuth2 client credentials for production environment at `src/main/resources/config/liquibase/oauth_client_details.csv` - -4.3. Change the base href url to `` at `src/main/webapp/index.html`. - -4.4. Build ManagementPortal for production -```shell -./gradlew bootRepackage -Pprod buildDocker -``` -4.5. Copy built `.war` file to `/managementportal/` +4. Move `etc/managementportal/oauth_client_details.csv.template` to `etc/managementportal/oauth_client_details.csv` and change OAuth client credentials for production MP. (Except ManagementPortalapp) 5. Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index 495313b2a..c986ac195 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -493,8 +493,7 @@ services: - "./etc/nginx.conf:/etc/nginx/nginx.conf:ro" managementportal-app: - image: managementportal - build: managementportal + image: radarcns/management-portal:latest networks: - default - management @@ -507,9 +506,9 @@ services: MANAGEMENTPORTAL_MAIL_FROM: ${FROM_EMAIL} MANAGEMENTPORTAL_MAIL_BASEURL: https://${SERVER_NAME}/managementportal JHIPSTER_SLEEP: 10 # gives time for the database to boot before the application - JAVA_OPTS: "-Xmx256m" # maximum heap size for the JVM running ManagementPortal, increase this as necessary -# ports: -# - "8081:8080" + JAVA_OPTS: -Xmx256m # maximum heap size for the JVM running ManagementPortal, increase this as necessary + volumes: + - ./etc/managementportal/changelogs:/changelogs managementportal-postgresql: networks: diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template b/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template new file mode 100644 index 000000000..b31608f7e --- /dev/null +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template @@ -0,0 +1,6 @@ +client_id;resource_ids;client_secret;scope;authorized_grant_types;web_server_redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove +ManagementPortalapp;res_ManagementPortal;my-secret-token-to-change-in-production;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;1800;3600;{};true +pRMT;res_ManagementPortal;;read,write;refresh_token,authorization_code;http://localhost:8080;ROLE_USER;43200;5184000;{};true +radar_restapi;res_ManagementPortal,res_gateway;my-secret-token-to-change-in-production;read;client_credentials;;ROLE_USER;1800;3600;{};true +THINC-IT;res_gateway,res_ManagementPortal;secret;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true +radar_redcap_integrator;res_ManagementPortal;my-secrect_token;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true diff --git a/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile b/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile deleted file mode 100644 index 3eb1bb213..000000000 --- a/dcompose-stack/radar-cp-hadoop-stack/managementportal/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM openjdk:8-jre-alpine - -ENV SPRING_OUTPUT_ANSI_ENABLED=ALWAYS \ - JHIPSTER_SLEEP=0 - -# add directly the war -ADD *.war /app.war - -VOLUME /tmp -EXPOSE 8080 5701/udp -CMD echo "The application will start in ${JHIPSTER_SLEEP}s..." && \ - sleep ${JHIPSTER_SLEEP} && \ - java $JAVA_OPTS -Djava.security.egd=file:/dev/./urandom -jar /app.war diff --git a/images/managementportal/Dockerfile b/images/managementportal/Dockerfile new file mode 100644 index 000000000..d1abb3c15 --- /dev/null +++ b/images/managementportal/Dockerfile @@ -0,0 +1,14 @@ +FROM openjdk:8-jre-alpine + +ENV SPRING_OUTPUT_ANSI_ENABLED=ALWAYS \ + JHIPSTER_SLEEP=0 + +# add directly the war +ADD https://github.com/RADAR-CNS/ManagementPortal/releases/download/v0.1-alpha/management-portal-0.0.1-SNAPSHOT.war /app.war + +VOLUME /tmp +EXPOSE 8080 5701/udp +VOLUME /changelogs +CMD echo "The application will start in ${JHIPSTER_SLEEP}s..." && \ + sleep ${JHIPSTER_SLEEP} && \ + java $JAVA_OPTS -Djava.security.egd=file:/dev/./urandom -cp /changelogs:/app.war org.springframework.boot.loader.WarLauncher diff --git a/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv b/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv new file mode 100644 index 000000000..b31608f7e --- /dev/null +++ b/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv @@ -0,0 +1,6 @@ +client_id;resource_ids;client_secret;scope;authorized_grant_types;web_server_redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove +ManagementPortalapp;res_ManagementPortal;my-secret-token-to-change-in-production;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;1800;3600;{};true +pRMT;res_ManagementPortal;;read,write;refresh_token,authorization_code;http://localhost:8080;ROLE_USER;43200;5184000;{};true +radar_restapi;res_ManagementPortal,res_gateway;my-secret-token-to-change-in-production;read;client_credentials;;ROLE_USER;1800;3600;{};true +THINC-IT;res_gateway,res_ManagementPortal;secret;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true +radar_redcap_integrator;res_ManagementPortal;my-secrect_token;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true From 01f1bc35fe05a9c5c93a1da43cc3343f00325c6c Mon Sep 17 00:00:00 2001 From: nivethika Date: Fri, 6 Oct 2017 16:58:13 +0200 Subject: [PATCH 194/197] correct path --- dcompose-stack/radar-cp-hadoop-stack/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/README.md b/dcompose-stack/radar-cp-hadoop-stack/README.md index a0c72ffab..ea25d5396 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/README.md +++ b/dcompose-stack/radar-cp-hadoop-stack/README.md @@ -9,7 +9,7 @@ 3. Modify the `etc/redcap-integration/radar.yml.template` to configure the properties of Redcap instance and the management portal, and move it to `etc/redcap-integration/radar.yml`. For reference on configuration of this file look at the Readme file here - https://github.com/RADAR-CNS/RADAR-RedcapIntegration#configuration In the REDcap portal under Project Setup, define the Data Trigger as `https:///redcapint/trigger` -4. Move `etc/managementportal/oauth_client_details.csv.template` to `etc/managementportal/oauth_client_details.csv` and change OAuth client credentials for production MP. (Except ManagementPortalapp) +4. Move `etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template` to `etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv` and change OAuth client credentials for production MP. (Except ManagementPortalapp) 5. Finally, move `etc/radar.yml.template` to `etc/radar.yml` and edit it, especially concerning the monitor email address configuration. From b8a38a5756eae6bcb480c5a06e8e887ffc23f529 Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 9 Oct 2017 10:57:27 +0200 Subject: [PATCH 195/197] correct path --- dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index c986ac195..f33bb4268 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -484,6 +484,7 @@ services: - rest-proxy-1 - dashboard - radar-integration + - managementportal-app ports: - "80:80" - "443:443" @@ -498,6 +499,9 @@ services: - default - management - mail + depends_on: + - managementportal-postgresql + - smtp environment: SPRING_PROFILES_ACTIVE: prod,swagger SPRING_DATASOURCE_URL: jdbc:postgresql://managementportal-postgresql:5432/managementportal From f6ac2450f4e3a398c6d3058392811015425e1fdd Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 9 Oct 2017 17:38:48 +0200 Subject: [PATCH 196/197] tweaks based on review --- .../radar-cp-hadoop-stack/docker-compose.yml | 16 ++++++++++------ .../radar-cp-hadoop-stack/etc/env.template | 1 + .../liquibase/oauth_client_details.csv.template | 6 +++--- .../radar-cp-hadoop-stack/install-radar-stack.sh | 1 + .../radar-cp-hadoop-stack/postgresql.yml | 14 -------------- ...ils.csv => oauth_client_details.csv.template} | 6 +++--- 6 files changed, 18 insertions(+), 26 deletions(-) delete mode 100644 dcompose-stack/radar-cp-hadoop-stack/postgresql.yml rename images/managementportal/etc/changelogs/config/liquibase/{oauth_client_details.csv => oauth_client_details.csv.template} (52%) diff --git a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml index f33bb4268..0810b538d 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml +++ b/dcompose-stack/radar-cp-hadoop-stack/docker-compose.yml @@ -221,8 +221,8 @@ services: - redcap - default - management - #depends_on: - #- hotstorage + depends_on: + - managementportal-app restart: always volumes: - "./etc/redcap-integration:/usr/local/tomcat/conf/radar" @@ -515,8 +515,12 @@ services: - ./etc/managementportal/changelogs:/changelogs managementportal-postgresql: + image: postgres:9.6.2 + volumes: + - "${MP_POSTGRES_DIR}/:/var/lib/postgresql/" + environment: + POSTGRES_USER : ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: managementportal networks: - - management - extends: - file: postgresql.yml - service: managementportal-postgresql \ No newline at end of file + - management \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index f047daf46..7817b43e7 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -12,6 +12,7 @@ HDFS_DATA_DIR_2=/usr/local/var/lib/docker/hdfs-data-2 HDFS_NAME_DIR_1=/usr/local/var/lib/docker/hdfs-name-1 HDFS_NAME_DIR_2=/usr/local/var/lib/docker/hdfs-name-2 MONGODB_DIR=/usr/local/var/lib/docker/mongodb +MP_POSTGRES_DIR=/usr/local/var/lib/docker/postgres POSTGRES_USER= POSTGRES_PASSWORD= FROM_EMAIL= \ No newline at end of file diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template b/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template index b31608f7e..17801e241 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/managementportal/changelogs/config/liquibase/oauth_client_details.csv.template @@ -1,6 +1,6 @@ client_id;resource_ids;client_secret;scope;authorized_grant_types;web_server_redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove ManagementPortalapp;res_ManagementPortal;my-secret-token-to-change-in-production;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;1800;3600;{};true pRMT;res_ManagementPortal;;read,write;refresh_token,authorization_code;http://localhost:8080;ROLE_USER;43200;5184000;{};true -radar_restapi;res_ManagementPortal,res_gateway;my-secret-token-to-change-in-production;read;client_credentials;;ROLE_USER;1800;3600;{};true -THINC-IT;res_gateway,res_ManagementPortal;secret;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true -radar_redcap_integrator;res_ManagementPortal;my-secrect_token;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true +radar_restapi;res_ManagementPortal,res_gateway;;read;client_credentials;;ROLE_USER;1800;3600;{};true +THINC-IT;res_gateway,res_ManagementPortal;;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true +radar_redcap_integrator;res_ManagementPortal;;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true diff --git a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh index 10636bc54..2ef389f74 100755 --- a/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh +++ b/dcompose-stack/radar-cp-hadoop-stack/install-radar-stack.sh @@ -8,6 +8,7 @@ check_parent_exists HDFS_DATA_DIR_2 ${HDFS_DATA_DIR_2} check_parent_exists HDFS_NAME_DIR_1 ${HDFS_NAME_DIR_1} check_parent_exists HDFS_NAME_DIR_2 ${HDFS_NAME_DIR_2} check_parent_exists MONGODB_DIR ${MONGODB_DIR} +check_parent_exists MP_POSTGRES_DIR ${MP_POSTGRES_DIR} if [ -z ${SERVER_NAME} ]; then echo "Set SERVER_NAME variable in .env" diff --git a/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml b/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml deleted file mode 100644 index e64e80d7a..000000000 --- a/dcompose-stack/radar-cp-hadoop-stack/postgresql.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: '2.1' -services: - managementportal-postgresql: - image: postgres:9.6.2 - # volumes: - # - ~/volumes/jhipster/managementportal/postgresql/:/var/lib/postgresql/ - environment: - POSTGRES_USER : ${POSTGRES_USER} - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - POSTGRES_DB: managementportal - ports: - - 5432:5432 - - diff --git a/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv b/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv.template similarity index 52% rename from images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv rename to images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv.template index b31608f7e..17801e241 100644 --- a/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv +++ b/images/managementportal/etc/changelogs/config/liquibase/oauth_client_details.csv.template @@ -1,6 +1,6 @@ client_id;resource_ids;client_secret;scope;authorized_grant_types;web_server_redirect_uri;authorities;access_token_validity;refresh_token_validity;additional_information;autoapprove ManagementPortalapp;res_ManagementPortal;my-secret-token-to-change-in-production;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;1800;3600;{};true pRMT;res_ManagementPortal;;read,write;refresh_token,authorization_code;http://localhost:8080;ROLE_USER;43200;5184000;{};true -radar_restapi;res_ManagementPortal,res_gateway;my-secret-token-to-change-in-production;read;client_credentials;;ROLE_USER;1800;3600;{};true -THINC-IT;res_gateway,res_ManagementPortal;secret;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true -radar_redcap_integrator;res_ManagementPortal;my-secrect_token;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true +radar_restapi;res_ManagementPortal,res_gateway;;read;client_credentials;;ROLE_USER;1800;3600;{};true +THINC-IT;res_gateway,res_ManagementPortal;;read,write;password,refresh_token,authorization_code,implicit;;ROLE_PROJECT_ADMIN,ROLE_USER,ROLE_SYS_ADMIN;43200;5184000;{};true +radar_redcap_integrator;res_ManagementPortal;;read,write;client_credentials;;ROLE_EXTERNAL_ERF_INTEGRATOR;1800;2000;{};true From b5b970dc68cddba8f7a969e4d8a299bc81741b93 Mon Sep 17 00:00:00 2001 From: nivethika Date: Mon, 9 Oct 2017 18:31:11 +0200 Subject: [PATCH 197/197] added thinc-it topics --- dcompose-stack/radar-cp-hadoop-stack/etc/env.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template index 7817b43e7..107117b0c 100644 --- a/dcompose-stack/radar-cp-hadoop-stack/etc/env.template +++ b/dcompose-stack/radar-cp-hadoop-stack/etc/env.template @@ -1,7 +1,7 @@ SERVER_NAME=localhost SELF_SIGNED_CERT=yes MAINTAINER_EMAIL=me@example.com -RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8,schemaless-key,schemaless-value +RADAR_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_acceleration_output,android_empatica_e4_battery_level,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8,schemaless-key,schemaless-value,thincit_code_breaker,thincit_spotter,thincit_symbol_check,thincit_trails RADAR_RAW_TOPIC_LIST=android_empatica_e4_acceleration,android_empatica_e4_battery_level,android_empatica_e4_blood_volume_pulse,android_empatica_e4_electrodermal_activity,android_empatica_e4_inter_beat_interval,android_empatica_e4_sensor_status,android_empatica_e4_temperature,application_server_status,application_record_counts,application_uptime,application_external_time,android_phone_battery_level,android_phone_acceleration,android_phone_light,android_pebble2_acceleration,android_pebble2_battery_level,android_pebble2_heart_rate,android_pebble2_heart_rate_filtered,active_questionnaire_phq8 RADAR_AGG_TOPIC_LIST=android_empatica_e4_acceleration_output,android_empatica_e4_battery_level_output,android_empatica_e4_blood_volume_pulse_output,android_empatica_e4_electrodermal_activity_output,android_empatica_e4_heartrate,android_empatica_e4_inter_beat_interval_output,android_empatica_e4_sensor_status_output,android_empatica_e4_temperature_output,application_server_status,application_record_counts,application_uptime,application_external_time,active_questionnaire_phq8 HOTSTORAGE_USERNAME=