-
Notifications
You must be signed in to change notification settings - Fork 0
/
autoencoder.py
56 lines (43 loc) · 1.57 KB
/
autoencoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import mlflow
import argparse
import sys
import pandas as pd
import mlflow.tensorflow
from pprint import pprint
from fetch_data import fetch_logged_data
from log_transformer import LogTransformer
from autoencoder_estimator import AutoEncoderEstimator
from sklearn.pipeline import Pipeline
DATA_URL = "datasets/unprocessed.csv"
# Enable auto-logging to MLflow to capture TensorBoard metrics.
mlflow.sklearn.autolog()
mlflow.tensorflow.autolog()
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", default=1000, type=int, help="number of epochs")
parser.add_argument("--batch_size", default=100, type=int, help="batch size")
def main(argv):
args = parser.parse_args(argv[1:])
# prepare training data
data = pd.read_csv(DATA_URL)
data = data.drop(data.loc[data['ip'] == '-'].index)
data = data.reset_index(drop=True)
# train a model
pipe = Pipeline(
[
("transform", LogTransformer()),
("autoencoder", AutoEncoderEstimator())
]
, verbose=True)
with mlflow.start_run() as run:
pipe.fit_transform(data)
print("Logged data and model in run: {}".format(run.info.run_id))
# # For loading model in the old way (load from pickle)
# mlflow.sklearn.log_model(pipe, 'model')
# # Save training data
# mlflow.log_artifact('datasets/unprocessed.csv')
# show logged data
for key, data in fetch_logged_data(run.info.run_id).items():
print("\n---------- logged {} ----------".format(key))
pprint(data)
if __name__ == "__main__":
main(sys.argv)