-
Notifications
You must be signed in to change notification settings - Fork 13
/
evaluation.py
31 lines (26 loc) · 1.03 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from __future__ import division
from sklearn.metrics import mean_squared_error
import numpy as np
def evaluate(predicted_probabilities, actual_probabilities):
print('\n*** BEGINNING EVALUATION ***\n')
total = len(predicted_probabilities)
almost_exact = 0
extremely_good = 0
average = 0
bad = 0
for i in range(0, len(predicted_probabilities)):
avg_err = np.mean(abs(actual_probabilities[i]-predicted_probabilities[i]))
if avg_err < 1:
almost_exact += 1
elif avg_err < 2:
extremely_good += 1
elif avg_err < 5:
average += 1
else:
bad += 1
print(str(actual_probabilities[i])+'\t'+str(predicted_probabilities[i])+'\t'+str(avg_err))
mse = mean_squared_error(actual_probabilities, predicted_probabilities)
print('MSE: '+str(mse))
print('almost_exact', 'extremely_good', 'average', 'bad')
print(almost_exact, extremely_good, average, bad)
print(almost_exact/total, extremely_good/total, average/total, bad/total)