-
Notifications
You must be signed in to change notification settings - Fork 0
/
LinearRegression.py
57 lines (38 loc) · 1.05 KB
/
LinearRegression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import numpy as np
from matplotlib import pyplot as plt
def hypothesis(X, theta):
return X.dot(theta)
def cost_function(loss):
m = len(loss)
return np.sum(loss ** 2) / (2 * m)
def gradient_decent(x, y, theta, alpha, m, numIterations):
J_hist = []
for i in range(0, numIterations):
loss = hypothesis(x, theta) - y
J_hist.append(cost_function(loss))
# update theta
theta -= (alpha/m) * np.dot(x.T, loss)
return theta, J_hist
data = np.genfromtxt('data/data1.csv', delimiter=",")
X, y = data[:, 0], data[:, 1]
m, n = data.shape
XMat = np.ones([m, 2])
yMat = y
for i in range(m):
XMat[i, 1] = X[i]
numIterations= 10000
alpha = 0.0005
theta = np.ones(n)
print("Cost for init params:")
print(cost_function(hypothesis(XMat, theta) - yMat))
theta, Jhist = gradient_decent(XMat, yMat, theta, alpha, XMat.shape[0], numIterations)
print(theta)
plt.figure(1)
plt.scatter(XMat[:,1], yMat, marker="+", c="r")
plt.plot(
range(100),
theta[1]*range(100) + theta[0]
)
plt.figure(2)
plt.plot(Jhist)
plt.show()