Skip to content

Commit

Permalink
refactor(python): add timestamp for logger (#22176)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao authored Jan 1, 2025
1 parent 44b7b52 commit dc66aff
Show file tree
Hide file tree
Showing 50 changed files with 388 additions and 202 deletions.
10 changes: 7 additions & 3 deletions aerospace/hm-aerosandbox/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
import aerosandbox.tools.pretty_plots as p
import matplotlib.pyplot as plt

logger = logging.getLogger(__name__)


def main() -> None:
wing_airfoil = asb.Airfoil("sd7037")
Expand Down Expand Up @@ -110,7 +112,7 @@ def main() -> None:
)
aero = vlm.run()
for k, v in aero.items():
logging.info(f"{k.rjust(4)} : {v}")
logger.info(f"{k.rjust(4)} : {v}")
vlm.draw(show_kwargs=dict(jupyter_backend="static"))

# Operating Point Optimization
Expand All @@ -126,7 +128,7 @@ def main() -> None:
opti.minimize(-L_over_D)
sol = opti.solve()
best_alpha = sol(alpha)
logging.info(f"Alpha for max L/D: {best_alpha:.3f} deg")
logger.info(f"Alpha for max L/D: {best_alpha:.3f} deg")

# Aerodynamic Shape Optimization
opti = asb.Opti()
Expand Down Expand Up @@ -183,5 +185,7 @@ def main() -> None:


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
main()
6 changes: 5 additions & 1 deletion aerospace/hm-openaerostruct/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from openaerostruct.geometry.utils import generate_mesh
from openmdao.api import n2

logger = logging.getLogger(__name__)


def main() -> None:
# Create a dictionary to store options about the mesh
Expand Down Expand Up @@ -102,5 +104,7 @@ def main() -> None:


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
main()
18 changes: 11 additions & 7 deletions api-rust/scripts/download_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import torch
import torchvision.models as models

logger = logging.getLogger(__name__)


def download_labels():
labels_url = (
Expand All @@ -13,30 +15,32 @@ def download_labels():
labels_path = Path("models/labels.txt")

if not labels_path.exists():
logging.info("Downloading labels...")
logger.info("Downloading labels...")
with httpx.Client() as client:
response = client.get(labels_url)
labels_path.write_bytes(response.content)
logging.info("Labels downloaded successfully")
logger.info("Labels downloaded successfully")
else:
logging.info("Labels file already exists")
logger.info("Labels file already exists")


def download_resnet18():
model_path = Path("models/resnet18.ot")

if not model_path.exists():
logging.info("Downloading ResNet18...")
logger.info("Downloading ResNet18...")
model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
model.eval()
traced_model = torch.jit.trace(model, torch.randn(1, 3, 224, 224))
traced_model.save(model_path)
logging.info("Model downloaded and saved successfully")
logger.info("Model downloaded and saved successfully")
else:
logging.info("Model file already exists")
logger.info("Model file already exists")


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
download_resnet18()
download_labels()
12 changes: 8 additions & 4 deletions cloud-computing/hm-ray/applications/calculate/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

import ray

logger = logging.getLogger(__name__)


@ray.remote
def square(n: int) -> int:
Expand All @@ -16,17 +18,19 @@ def sum_list(numbers: list[int]) -> int:


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
ray.init()
logging.info(ray.cluster_resources())
logger.info(ray.cluster_resources())

numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
squared_tasks = [square.remote(n) for n in numbers]
squared_results: list[int] = ray.get(squared_tasks)
logging.info(f"{squared_results = }")
logger.info(f"{squared_results = }")

sum_task = sum_list.remote(squared_results)
total_sum = ray.get(sum_task)
logging.info(f"{total_sum = }")
logger.info(f"{total_sum = }")

ray.shutdown()
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
import pandas as pd
import ray

logger = logging.getLogger(__name__)


@ray.remote
def process_flight_data(
Expand Down Expand Up @@ -36,10 +38,12 @@ def process_flight_data(


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)

ray.init()
logging.info(ray.cluster_resources())
logger.info(ray.cluster_resources())

mlflow_tracking_server_host = config.MLFLOW_TRACKING_SERVER_HOST
mlflow_tracking_server_user_name = config.MLFLOW_TRACKING_USERNAME
Expand Down Expand Up @@ -88,7 +92,7 @@ def process_flight_data(

results = ray.get(tasks)
for i, df_head in enumerate(results):
logging.info(f"Experiment {i}")
logging.info(f"{df_head = }")
logger.info(f"Experiment {i}")
logger.info(f"{df_head = }")

ray.shutdown()
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, concat, date_format, from_unixtime, lit, when

logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

raw_parquet_paths = [
"s3://hm-production-bucket/data/raw-parquet/adsb_2x_flight_trace_data/"
Expand Down Expand Up @@ -142,7 +145,7 @@ def add_coordinate_column(
df = s3_node.toDF()

if df.isEmpty():
logging.info("DataFrame is empty.")
logger.info("DataFrame is empty.")
job.commit()
os._exit(os.EX_OK)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext

logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

raw_parquet_paths = ["s3://hm-production-bucket/data/parquet/motor/"]
delta_table_path = "s3://hm-production-bucket/data/delta-tables/motor_data/"
Expand All @@ -34,7 +37,7 @@
df = s3_node.toDF()

if df.isEmpty():
logging.info("DataFrame is empty.")
logger.info("DataFrame is empty.")
job.commit()
os._exit(os.EX_OK)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@
import torch.utils.data.distributed
from torchvision import datasets, transforms

logger = logging.getLogger(__name__)


def get_test_data_loader(test_batch_size, training_dir, **kwargs):
logging.info("Get test data loader")
logger.info("Get test data loader")
return torch.utils.data.DataLoader(
datasets.MNIST(
training_dir,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@
import torch.utils.data.distributed
from torchvision import datasets, transforms

logger = logging.getLogger(__name__)


def get_train_data_loader(batch_size, training_dir, is_distributed, **kwargs):
logging.info("Get train data loader")
logger.info("Get train data loader")
dataset = datasets.MNIST(
training_dir,
train=True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
import torch.utils.data
import torch.utils.data.distributed

logger = logging.getLogger(__name__)


def save_model(model, model_dir):
logging.info("Save the model.")
logger.info("Save the model.")
path = os.path.join(model_dir, "model.pth")
torch.save(model.cpu().state_dict(), path)
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import torch.utils.data
import torch.utils.data.distributed

logger = logging.getLogger(__name__)


def test(model, test_loader, device):
model.eval()
Expand All @@ -21,7 +23,7 @@ def test(model, test_loader, device):
correct += pred.eq(target.view_as(pred)).sum().item()

test_loss /= len(test_loader.dataset)
logging.info(
logger.info(
"Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
from utils.save_model import save_model
from utils.test import test

logger = logging.getLogger(__name__)


def train(args):
is_distributed = len(args.hosts) > 1 and args.backend is not None
logging.info("Distributed training:", is_distributed)
logger.info("Distributed training:", is_distributed)

use_cuda = args.num_gpus > 0
logging.info("Number of gpus available:", args.num_gpus)
logger.info("Number of gpus available:", args.num_gpus)
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
device = torch.device("cuda" if use_cuda else "cpu")

Expand All @@ -33,7 +35,7 @@ def train(args):
dist.init_process_group(
backend=args.backend, rank=host_rank, world_size=world_size
)
logging.info(
logger.info(
"Initialized the distributed environment: '{}' backend on {} nodes. ".format(
args.backend, dist.get_world_size()
)
Expand All @@ -52,15 +54,15 @@ def train(args):
)
test_loader = get_test_data_loader(args.test_batch_size, args.data_dir, **kwargs)

logging.info(
logger.info(
"Processes {}/{} ({:.0f}%) of train data".format(
len(train_loader.sampler),
len(train_loader.dataset),
100.0 * len(train_loader.sampler) / len(train_loader.dataset),
)
)

logging.info(
logger.info(
"Processes {}/{} ({:.0f}%) of test data".format(
len(test_loader.sampler),
len(test_loader.dataset),
Expand Down Expand Up @@ -91,7 +93,7 @@ def train(args):
average_gradients(model)
optimizer.step()
if batch_idx % args.log_interval == 0:
logging.info(
logger.info(
"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}".format(
epoch,
batch_idx * len(data),
Expand Down
7 changes: 5 additions & 2 deletions computer-vision/hm-imagebind/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from lancedb.pydantic import LanceModel, Vector
from lancedb.table import Table

logger = logging.getLogger(__name__)
EMBEDDINGS = get_registry().get("imagebind").create()
DATA_DIR = Path("data")

Expand Down Expand Up @@ -54,7 +55,7 @@ def download_file(client: httpx.Client, url: str, is_audio: bool = True) -> Path
if response.status_code == 200:
with open(local_file_path, "wb") as file:
file.write(response.content)
logging.info(f"Downloaded file: {local_file_path}")
logger.info(f"Downloaded file: {local_file_path}")
return local_file_path
else:
raise RuntimeError(f"Download failed: {response}")
Expand Down Expand Up @@ -164,5 +165,7 @@ def main() -> None:


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
main()
Loading

0 comments on commit dc66aff

Please sign in to comment.