Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev/bach #13

Merged
merged 15 commits into from
Jul 18, 2024
1 change: 1 addition & 0 deletions .github/workflows/test-flow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ jobs:
pip install -e .

- name: Run test_flow.py
working-directory: ./tests
run: python test_flows.py
env:
S3_ACCESS_KEY: ${{ secrets.MINIO_ACCESS_KEY_ID }}
Expand Down
5 changes: 3 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

setup(
name='research-utils',
version='0.2.1', # Increment the version number
version='0.2.2', # Increment the version number
description='A helper library for working with S3/Minio, Hugging Face models, and datasets',
long_description='This library provides utilities for downloading and managing machine learning models and datasets from S3-compatible storage services, and loading them using the Hugging Face libraries.',
author='Alan',
Expand All @@ -15,7 +15,8 @@
# tokenizers >=0.13.3
'tokenizers==0.13.3',
'transformers',
'datasets==2.20.0', # Add the datasets library
'datasets==2.20.0',
'torch', # Add the datasets library
],
classifiers=[
'Programming Language :: Python :: 3',
Expand Down
20 changes: 0 additions & 20 deletions test_flows.py

This file was deleted.

130 changes: 130 additions & 0 deletions tests/test_flows.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import unittest
from unittest.mock import patch, MagicMock
import logging
import time
from io import StringIO
from s3helper import S3Helper, S3HelperAutoConfig, S3HelperAutoTokenizer, S3HelperAutoModelForCausalLM, s3_load_dataset
import os
from unittest.mock import ANY
class CustomTestResult(unittest.TestResult):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.successes = []

def addSuccess(self, test):
super().addSuccess(test)
self.successes.append(test)

class CustomTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stream = StringIO()
self.results = []

def run(self, test):
result = CustomTestResult()
start_time = time.time()
test(result)
time_taken = time.time() - start_time
self.results.append((result, time_taken))
return result

def print_results(self):
print("\n=== Test Results ===")
total_tests = 0
total_successes = 0
total_failures = 0
total_errors = 0
total_time = 0

for result, time_taken in self.results:
total_tests += result.testsRun
total_successes += len(result.successes)
total_failures += len(result.failures)
total_errors += len(result.errors)
total_time += time_taken

print(f"Ran {total_tests} tests in {total_time:.3f} seconds")
print(f"Successes: {total_successes}")
print(f"Failures: {total_failures}")
print(f"Errors: {total_errors}")

print("\nDetailed Results:")
for result, time_taken in self.results:
for test in result.successes:
print(f"PASS: {test._testMethodName}")
for test, _ in result.failures:
print(f"FAIL: {test._testMethodName}")
for test, _ in result.errors:
test_name = getattr(test, '_testMethodName', str(test))
print(f"ERROR: {test_name}")

if total_failures > 0 or total_errors > 0:
print("\nFailure and Error Details:")
for result, _ in self.results:
for test, traceback in result.failures:
print(f"\nFAILURE: {test._testMethodName}")
print(traceback)
for test, traceback in result.errors:
test_name = getattr(test, '_testMethodName', str(test))
print(f"\nERROR: {test_name}")
print(traceback)
else:
print("\nAll tests passed successfully!")

def test_name(name):
def decorator(func):
func.__name__ = name
return func
return decorator

class TestS3Helper(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_name = "jan-hq-test/tokenizer-tinyllama"
cls.dataset_name = "jan-hq-test/test-dataset"
S3Helper()

@test_name("Tokenizer Loading Test")
# @patch('s3helper.S3HelperAutoTokenizer.from_pretrained')
def test_tokenizer_loading(self):
# mock_tokenizer = MagicMock()
# mock_from_pretrained.return_value = mock_tokenizer

tokenizer = S3HelperAutoTokenizer.from_pretrained(self.model_name)

# mock_from_pretrained.assert_called_once_with(self.model_name)
self.assertIsNotNone(tokenizer)
# self.assertEqual(tokenizer, mock_tokenizer)

@test_name("Dataset Loading Test")
# @patch('s3helper.s3_load_dataset')
def test_dataset_loading(self):
try:
dataset = s3_load_dataset(self.dataset_name, file_format='parquet', split='train')
print(dataset)
self.assertIsNotNone(dataset)
except Exception as e:
self.fail(f"s3_load_dataset raised an exception: {e}")
@test_name("Config Loading Test")
def test_config_loading(self):

config = S3HelperAutoConfig.from_pretrained(self.model_name)
self.assertIsNotNone(config)

@test_name("Model Loading Test")
@patch('s3helper.S3HelperAutoModelForCausalLM.from_pretrained')
def test_model_loading(self):
# mock_model = MagicMock()
# mock_from_pretrained.return_value = mock_model

model = S3HelperAutoModelForCausalLM.from_pretrained(self.model_name)

# mock_from_pretrained.assert_called_once_with(self.model_name)
self.assertIsNotNone(model)
# self.assertEqual(model, mock_model)
if __name__ == '__main__':
runner = CustomTestRunner()
test_suite = unittest.TestLoader().loadTestsFromTestCase(TestS3Helper)
result = runner.run(test_suite)
runner.print_results()