forked from AICoE/log-anomaly-detector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.example
28 lines (26 loc) · 1.98 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
LADT_MODEL_DIR="models" # Name of persistant storage directory that models will be saved to
LADT_ELASTICSEARCH_ENDPOINT='' # address to Elasticsearch endpoint
LADT_MODEL='map.sav' # path to SOM map to update
LADT_INDEX='logstash-' # name of Elasticsearch index to pull logs from
LADT_TIME_SPAN=900 # number of seconds from now that you would like to query Elasticsearch for training logs
LADT_MAX_ENTRIES=450000 # limits the number of entries returned from Elasticsearch for the training query
LADT_ITERS=45000 # number of training iterations of the SOM
LADT_SERVICE='journal' # name of Elasticsearch service to be monitored
LADT_TRAIN_LAG=15 # Number of Inference iterations between retraining the model
LADT_UPDATE_MODEL=True
LADI_ELASTICSEARCH_ENDPOINT='' # address to Elasticsearch endpoint for inference
LADI_MODEL='map.sav' # path to SOM map to test against
LADI_INDEX='logstash-' # name of Elasticsearch index to pull logs from
LADI_TARGET_INDEX='mcliffor_test_ingest-' # name of index the anoamliy data will be pushed back to in Elasticsearch
LADI_TIME_SPAN=60 # number of seconds from now that you would like to query Elasticsearch for
LADI_MAX_ENTRIES=35000 # limits the number of entries returned from Elasticsearch for the inference query
LADI_SERVICE='journal' # name of Elasticsearch service to be monitored
LADI_THRESHOLD=1.5 # float tunes the anomaly threshold based on the max value of the training set
LOCAL_DATA_FILE=validation_data/Hadoop_2k.json
LOCAL_RESULTS_FILE=/tmp/local-results.txt
LAD_S3_KEY= # The s3 key <REPLACE_WITH_S3_KEY>
LAD_S3_SECRET= # The s3 secret <REPLACE_WITH_S3_SECRET>
LAD_S3_HOST=http://localhost:8080 # The s3 endpoint url
LAD_S3_BUCKET= # The bucket name for s3 <REPLACE_WITH_BUCKET_NAME>
LAD_MODEL_STORE= # If s3 is not provided then it will write model files just to disk and not sync with s3
LAD_MODE=train # set to 'inference' if you desire to serve models or set to 'train' if you want to create the models. (defaults to train and then serve for inference)