Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GPU support + Linear SVC/Linear SVR + Gradient Check #67

Open
wants to merge 33 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
bd70060
Initial Implementation
210057zzh Oct 17, 2022
252d641
add tutorials for KNN
210057zzh Oct 18, 2022
fd13047
Add link for sources
210057zzh Oct 18, 2022
d293083
remove doc
210057zzh Oct 18, 2022
dcd2a1a
vectorized LinearSVC
210057zzh Oct 18, 2022
7117e9f
support hinge loss
210057zzh Oct 18, 2022
c0c5283
fix format
210057zzh Oct 19, 2022
b4db3fd
Merge branch 'master' into LinearSVC
210057zzh Oct 21, 2022
65bbe31
add support for multiclass
210057zzh Oct 22, 2022
97872c4
change n_informative
210057zzh Oct 25, 2022
e1ba7d2
implemented predict and decision function
210057zzh Oct 25, 2022
8193a94
add skeleton
210057zzh Oct 28, 2022
cbd5d01
implemented svr
210057zzh Oct 28, 2022
9eefcdb
implemented svr
210057zzh Oct 28, 2022
4abcbc9
black the repo
210057zzh Oct 28, 2022
7638bed
add docs for linearSVC
210057zzh Oct 31, 2022
9610f73
fix doc
210057zzh Oct 31, 2022
63cdd9a
add docs for linearSVR
210057zzh Oct 31, 2022
943224a
add dpp formulation
210057zzh Nov 5, 2022
22afc05
add gradient support
210057zzh Nov 5, 2022
4dd0f57
Merge branch 'LinearSVC' into LinearSVR
210057zzh Nov 5, 2022
9364f72
make format
210057zzh Nov 5, 2022
3c0d3ce
fix tests
210057zzh Nov 5, 2022
60f670c
add pylayers to linear svr
210057zzh Nov 11, 2022
902e13f
make format
210057zzh Nov 11, 2022
28840a4
add knnclassifier gradcheck
210057zzh Nov 11, 2022
899e6f1
add gradcheck to neighbors
210057zzh Nov 11, 2022
688c439
add gradcheck to all
210057zzh Nov 11, 2022
2a0f810
add lasso gpu support
210057zzh Nov 14, 2022
a5d4336
add gpu support for ridge and linear regression
210057zzh Nov 14, 2022
38b4400
add gpu for neighbors
210057zzh Nov 14, 2022
f027972
add gpu support
210057zzh Nov 14, 2022
c629c32
add gpu support
210057zzh Nov 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions tests/unit/gaussian_naive_bayes/gaussian_nb_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch
import torchml as ml
from sklearn.naive_bayes import GaussianNB
from torch.autograd import gradcheck


BSZ = 128
Expand All @@ -25,6 +26,10 @@ def test_fit(self):

self.assertTrue(np.allclose(ref_preds, model_preds.numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.numpy()))
inputX = torch.from_numpy(X)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))


if __name__ == "__main__":
Expand Down
155 changes: 98 additions & 57 deletions tests/unit/linear_model/lasso_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch
import torchml as ml
import sklearn.linear_model as linear_model
from torch.autograd import gradcheck


BSZ = 128
Expand All @@ -11,70 +12,110 @@

class TestLasso(unittest.TestCase):
def test_fit(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso()
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))

self.assertTrue(
np.allclose(ref_preds, model_preds[0].detach().numpy().flatten(), atol=1e-3)
)
self.assertTrue(
np.allclose(
ref_preds, model_forward[0].detach().numpy().flatten(), atol=1e-3
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")

X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso()
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(
np.allclose(
ref_preds,
model_preds[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)
)
self.assertTrue(
np.allclose(
ref_preds,
model_forward[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)

inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))

def test_fit_intercept(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=True)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso(fit_intercept=True)
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))

self.assertTrue(
np.allclose(ref_preds, model_preds[0].detach().numpy().flatten(), atol=1e-3)
)
self.assertTrue(
np.allclose(
ref_preds, model_forward[0].detach().numpy().flatten(), atol=1e-3
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=True)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso(fit_intercept=True)
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(
np.allclose(
ref_preds,
model_preds[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)
self.assertTrue(
np.allclose(
ref_preds,
model_forward[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)
)

inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))

def test_fit_positive(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=False, positive=True)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso(fit_intercept=False, positive=True)
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))

self.assertTrue(
np.allclose(ref_preds, model_preds[0].detach().numpy().flatten(), atol=1e-3)
)
self.assertTrue(
np.allclose(
ref_preds, model_forward[0].detach().numpy().flatten(), atol=1e-3
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Lasso(fit_intercept=False, positive=True)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Lasso(fit_intercept=False, positive=True)
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(
np.allclose(
ref_preds,
model_preds[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)
self.assertTrue(
np.allclose(
ref_preds,
model_forward[0].detach().cpu().numpy().flatten(),
atol=1e-3,
)
)
)

inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))


if __name__ == "__main__":
Expand Down
36 changes: 22 additions & 14 deletions tests/unit/linear_model/linear_regression_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch
import torchml as ml
import sklearn.linear_model as linear_model
from torch.autograd import gradcheck


BSZ = 128
Expand All @@ -11,20 +12,27 @@

class TestLinearRegression(unittest.TestCase):
def test_fit(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.LinearRegression(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.LinearRegression(fit_intercept=False)
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))

self.assertTrue(np.allclose(ref_preds, model_preds.numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.numpy()))
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.LinearRegression(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.LinearRegression(fit_intercept=False)
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(np.allclose(ref_preds, model_preds.cpu().numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.cpu().numpy()))

inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))


if __name__ == "__main__":
Expand Down
65 changes: 40 additions & 25 deletions tests/unit/linear_model/ridge_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch
import torchml as ml
import sklearn.linear_model as linear_model
from torch.autograd import gradcheck


BSZ = 128
Expand All @@ -11,36 +12,50 @@

class TestRidge(unittest.TestCase):
def test_fit(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Ridge(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)
ref = linear_model.Ridge(fit_intercept=False)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Ridge(fit_intercept=False)
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))
model = ml.linear_model.Ridge(fit_intercept=False)
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(np.allclose(ref_preds, model_preds.numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.numpy()))
self.assertTrue(np.allclose(ref_preds, model_preds.cpu().numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.cpu().numpy()))

def test_fit_intercept(self):
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Ridge(fit_intercept=True)
ref.fit(X, y)
ref_preds = ref.predict(X)
inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))

model = ml.linear_model.Ridge(fit_intercept=True)
model.fit(torch.from_numpy(X), torch.from_numpy(y))
model_preds = model.predict(torch.from_numpy(X))
model_forward = model(torch.from_numpy(X))

self.assertTrue(np.allclose(ref_preds, model_preds.numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.numpy()))
def test_fit_intercept(self):
for i in range(2):
device = torch.device("cuda" if torch.cuda.is_available() and i else "cpu")
X = np.random.randn(BSZ, DIM)
y = np.random.randn(BSZ, 1)

ref = linear_model.Ridge(fit_intercept=True)
ref.fit(X, y)
ref_preds = ref.predict(X)

model = ml.linear_model.Ridge(fit_intercept=True)
model.fit(torch.from_numpy(X).to(device), torch.from_numpy(y).to(device))
model_preds = model.predict(torch.from_numpy(X).to(device))
model_forward = model(torch.from_numpy(X).to(device))

self.assertTrue(np.allclose(ref_preds, model_preds.cpu().numpy()))
self.assertTrue(np.allclose(ref_preds, model_forward.cpu().numpy()))

inputX = torch.from_numpy(X).to(device)
inputX.requires_grad = True
self.assertTrue(gradcheck(model.predict, inputX, eps=1e-6, atol=1e-3))
self.assertTrue(gradcheck(model, inputX, eps=1e-6, atol=1e-3))


if __name__ == "__main__":
Expand Down
Loading