diff --git a/.github/workflows/test-examples.yml b/.github/workflows/test-examples.yml index c99e5c27..2b0857f0 100644 --- a/.github/workflows/test-examples.yml +++ b/.github/workflows/test-examples.yml @@ -21,6 +21,7 @@ jobs: - integrations/model-evaluation/gradio/notebooks/Gradio_and_Comet.ipynb - integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb - integrations/model-optimization/ray-tune/notebooks/Comet_and_Ray.ipynb + - integrations/model-training/fastai/notebooks/fastai_hello_world.ipynb - integrations/model-training/hugging_face/notebooks/Comet_with_Hugging_Face_Trainer.ipynb - integrations/model-training/keras/notebooks/Comet_with_Keras.ipynb - integrations/model-training/lightgbm/notebooks/Comet_and_LightGBM.ipynb @@ -99,6 +100,8 @@ jobs: os: [ubuntu-latest] python-version: ["3.10"] example: + - {script: "integrations/model-optimization/optuna/optuna-hello-world/optuna-hello-world.py", arg: ""} + - {script: "integrations/model-training/fastai/fastai-hello-world/fastai_hello_world.py", arg: ""} - {script: "integrations/model-training/hugging_face/transformers-distilbert-fine-tuning/transformers-distilbert-fine-tuning.py", arg: ""} - {script: "integrations/model-training/keras/keras-mnist-dnn/keras-mnist-dnn.py", arg: ""} - {script: "integrations/model-training/mlflow/mlflow-hello-world/mlflow-hello-world.py", arg: "run"} @@ -112,7 +115,6 @@ jobs: - {script: "integrations/workflow-orchestration/metaflow/metaflow-hello-world/helloworld.py", arg: "run"} - {script: "integrations/workflow-orchestration/metaflow/metaflow-model-evaluation/metaflow-model-evaluation.py", arg: "run --max-workers 1 --n_samples 100"} - {script: "integrations/workflow-orchestration/metaflow/metaflow-regression/metaflow-regression-example.py", arg: "run"} - - {script: "integrations/model-optimization/optuna/optuna-hello-world/optuna-hello-world.py", arg: ""} env: SCRIPT_TO_TEST: ${{ matrix.example.script }} steps: diff --git a/fastai/README.md b/fastai/README.md new file mode 100644 index 00000000..769e20eb --- /dev/null +++ b/fastai/README.md @@ -0,0 +1 @@ +All of the fastai examples has been moved here: https://github.com/comet-ml/comet-examples/tree/master/integrations/model-training/fastai/. diff --git a/fastai/train-example.py b/fastai/train-example.py deleted file mode 100644 index 532d6146..00000000 --- a/fastai/train-example.py +++ /dev/null @@ -1,44 +0,0 @@ -## MNIST Example in fastai - -## Note: this uses fastai version 1.0.38 -## pip install fastai==1.0.38 - -from comet_ml import Experiment - -import fastai -import fastai.vision -import glob -import os - -# The model, also known as wrn_22: -model = fastai.vision.models.WideResNet(num_groups=3, - N=3, - num_classes=10, - k=6, - drop_p=0.) - -## Get the MNIST_TINY dataset: -path = fastai.datasets.untar_data(fastai.datasets.URLs.MNIST_TINY) -print("data path:", path) - -## Still too many for a CPU, so we trim it down to 10 in each category: -dirname = os.path.dirname(path) -for group in ["mnist_tiny/train/3/*.png", - "mnist_tiny/train/7/*.png", - "mnist_tiny/valid/3/*.png", - "mnist_tiny/valid/7/*.png"]: - for filename in glob.glob(os.path.join(dirname, group))[10:]: - os.remove(filename) - -experiment = Experiment(project_name="fastai") - -## Now we get the image data from the folder: -data = fastai.vision.ImageDataBunch.from_folder(path, bs=10) # bs: batch size - -if data.device.type == 'cpu': - learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy) -else: # GPU: - learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy).to_fp16() - -with experiment.train(): - learn.fit_one_cycle(10, 3e-3, wd=0.4, div_factor=10, pct_start=0.5) diff --git a/integrations/llm/langchain/notebooks/Comet_with_Langchain.ipynb b/integrations/llm/langchain/notebooks/Comet_with_Langchain.ipynb index 75870564..783a2607 100644 --- a/integrations/llm/langchain/notebooks/Comet_with_Langchain.ipynb +++ b/integrations/llm/langchain/notebooks/Comet_with_Langchain.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -U comet_llm \"langchain>=0.0.346\" openai google-search-results numexpr" + "%pip install -U comet_llm \"langchain>=0.0.346\" openai numexpr" ] }, { @@ -86,7 +86,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You will need an [OpenAI API Key](https://platform.openai.com/account/api-keys) and a [SerpAPI API Key](https://serpapi.com/dashboard) to run the following examples" + "You will need an [OpenAI API Key](https://platform.openai.com/account/api-keys) to run the following examples" ] }, { @@ -98,8 +98,7 @@ "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"...\"\n", - "# os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"\n", - "os.environ[\"SERPAPI_API_KEY\"] = \"...\"" + "# os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"" ] }, { @@ -121,9 +120,11 @@ "from langchain.llms import OpenAI\n", "\n", "comet_callback = CometTracer()\n", - "llm = OpenAI(temperature=0.9, callbacks=[comet_callback], verbose=True)\n", - "\n", - "llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\", \"Tell me a fact\"] * 3)\n", + "llm = OpenAI(temperature=0.9, verbose=True)\n", + "llm_result = llm.generate(\n", + " [\"Tell me a joke\", \"Tell me a poem\", \"Tell me a fact\"] * 3,\n", + " callbacks=[comet_callback],\n", + ")\n", "print(\"LLM result\", llm_result)\n", "comet_callback.flush()" ] @@ -149,18 +150,16 @@ "comet_callback = CometTracer()\n", "callbacks = [comet_callback]\n", "\n", - "llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)\n", + "llm = OpenAI(temperature=0.9, verbose=True)\n", "\n", "template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n", "Title: {title}\n", "Playwright: This is a synopsis for the above play:\"\"\"\n", "prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n", - "synopsis_chain = LLMChain(\n", - " llm=llm, prompt=prompt_template, callbacks=callbacks, verbose=True\n", - ")\n", + "synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, verbose=True)\n", "\n", "test_prompts = [{\"title\": \"Documentary about Bigfoot in Paris\"}]\n", - "print(synopsis_chain.apply(test_prompts))\n", + "print(synopsis_chain.apply(test_prompts, callbacks=callbacks))\n", "comet_callback.flush()" ] }, @@ -177,25 +176,24 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks.tracers.comet import CometTracer\n", "from langchain.llms import OpenAI\n", "\n", "comet_callback = CometTracer()\n", "callbacks = [comet_callback]\n", "\n", - "llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)\n", + "llm = OpenAI(temperature=0.9, verbose=True)\n", "\n", - "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks, verbose=True)\n", + "tools = load_tools([\"llm-math\"], llm=llm, verbose=True)\n", "agent = initialize_agent(\n", " tools,\n", " llm,\n", - " agent=\"zero-shot-react-description\",\n", - " callbacks=callbacks,\n", + " AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", " verbose=True,\n", ")\n", "agent.run(\n", - " \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n", + " \"What is 2 raised to .123243 power?\",\n", " callbacks=callbacks,\n", ")\n", "comet_callback.flush()" @@ -218,7 +216,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.3" } }, "nbformat": 4, diff --git a/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb b/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb index a5c4763a..9bb32ffa 100644 --- a/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb +++ b/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb @@ -37,11 +37,12 @@ "cell_type": "code", "execution_count": null, "metadata": { - "id": "c-Dq42vt_Vbr" + "id": "c-Dq42vt_Vbr", + "scrolled": true }, "outputs": [], "source": [ - "%pip install comet_ml torch transformers gradio shap" + "%pip install comet_ml torch transformers \"gradio>=4.0\" shap" ] }, { @@ -134,9 +135,9 @@ " workspace=workspace, project_name=project_name\n", " )\n", " experiment.log_other(\"Created from\", \"gradio-inference\")\n", - " \n", + "\n", " message = f\"Started Experiment: [{experiment.name}]({experiment.url})\"\n", - " \n", + "\n", " return (experiment, message)\n", "\n", " except Exception as e:\n", @@ -167,7 +168,7 @@ " input_text = gr.Textbox(label=\"Input Text\", lines=5, interactive=True)\n", " submit_btn = gr.Button(\"Submit\")\n", "\n", - " output = gr.HTML(interactive=True)\n", + " output = gr.HTML()\n", "\n", " start_experiment_btn.click(\n", " start_experiment, outputs=[experiment, experiment_status]\n", @@ -220,9 +221,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 4 } diff --git a/integrations/model-optimization/ray-tune/notebooks/Comet_and_Ray.ipynb b/integrations/model-optimization/ray-tune/notebooks/Comet_and_Ray.ipynb index f95019c5..f8a4369d 100644 --- a/integrations/model-optimization/ray-tune/notebooks/Comet_and_Ray.ipynb +++ b/integrations/model-optimization/ray-tune/notebooks/Comet_and_Ray.ipynb @@ -103,7 +103,7 @@ "import numpy as np\n", "\n", "import ray\n", - "from ray import tune, air\n", + "from ray import tune, air, train\n", "from ray.air.integrations.comet import CometLoggerCallback\n", "from ray.air.config import ScalingConfig\n", "\n", @@ -111,7 +111,7 @@ "def train_function(config):\n", " for i in range(30):\n", " loss = config[\"mean\"] + config[\"sd\"] * np.random.randn()\n", - " tune.report(loss=loss)\n", + " train.report({\"loss\": loss})\n", "\n", "\n", "def tune_function():\n", @@ -240,7 +240,7 @@ "}\n", "\n", "\n", - "stop = {\"training_iteration\": 5}\n", + "stop = {\"training_iteration\": 10}\n", "\n", "tuner = tune.Tuner(\n", " \"PPO\",\n", @@ -260,13 +260,6 @@ "\n", "check_learning_achieved(results, 100)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -289,7 +282,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/integrations/model-training/fastai/fastai-hello-world/README.md b/integrations/model-training/fastai/fastai-hello-world/README.md new file mode 100644 index 00000000..d9a4c15a --- /dev/null +++ b/integrations/model-training/fastai/fastai-hello-world/README.md @@ -0,0 +1,30 @@ +# FastAI integration with Comet.ml + +[fastai](https://github.com/fastai/fastai) is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches. + +Instrument fastai with Comet to start managing experiments, create dataset versions and track hyperparameters for faster and easier reproducibility and collaboration. + +## Documentation + +For more information on using and configuring the fastai integration, see: [https://www.comet.com/docs/v2/integrations/ml-frameworks/fastai/](https://www.comet.com/docs/v2/integrations/ml-frameworks/fastai/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai) + +## See it + +Take a look at this [public Comet Project](https://www.comet.com/examples/comet-examples-fastai-hello-world/view/new/panels?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai). + +## Setup + +Install dependencies + +```bash +python -m pip install -r requirements.txt +``` + +## Run the example + +This example is fine-tuning a pre-trained resnet 28 model on the Mnist Tiny dataset for 5 epochs: + + +```bash +python fastai-hello-world.py +``` diff --git a/integrations/model-training/fastai/fastai-hello-world/fastai_hello_world.py b/integrations/model-training/fastai/fastai-hello-world/fastai_hello_world.py new file mode 100644 index 00000000..0cf64ed4 --- /dev/null +++ b/integrations/model-training/fastai/fastai-hello-world/fastai_hello_world.py @@ -0,0 +1,39 @@ +# coding: utf-8 +import comet_ml + +from fastai.vision.all import ( + Categorize, + Datasets, + GrandparentSplitter, + IntToFloatTensor, + PILImageBW, + ToTensor, + URLs, + error_rate, + get_image_files, + parent_label, + resnet18, + untar_data, + vision_learner, +) + +EPOCHS = 5 + +comet_ml.init(project_name="comet-examples-fastai-hello-world") +experiment = comet_ml.Experiment() + +path = untar_data(URLs.MNIST_TINY) + +items = get_image_files(path) +tds = Datasets( + items, + [PILImageBW.create, [parent_label, Categorize()]], + splits=GrandparentSplitter()(items), +) +dls = tds.dataloaders(after_item=[ToTensor(), IntToFloatTensor()]) + +learn = vision_learner(dls, resnet18, pretrained=True, metrics=error_rate) + +learn.fit_one_cycle(EPOCHS) + +experiment.end() diff --git a/integrations/model-training/fastai/fastai-hello-world/requirements.txt b/integrations/model-training/fastai/fastai-hello-world/requirements.txt new file mode 100644 index 00000000..10f5d232 --- /dev/null +++ b/integrations/model-training/fastai/fastai-hello-world/requirements.txt @@ -0,0 +1,2 @@ +comet_ml +fastai diff --git a/integrations/model-training/fastai/notebooks/fastai_hello_world.ipynb b/integrations/model-training/fastai/notebooks/fastai_hello_world.ipynb new file mode 100644 index 00000000..47619542 --- /dev/null +++ b/integrations/model-training/fastai/notebooks/fastai_hello_world.ipynb @@ -0,0 +1,304 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "8LE43szDmKdp" + }, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[Comet](https://www.comet.com/site/products/ml-experiment-tracking/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai) is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free to get started!\n", + "\n", + "[fastai](https://github.com/fastai/fastai) is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches.\n", + "\n", + "[Find more information about our integration with FastAI](https://www.comet.ml/docs/v2/integrations/ml-frameworks/fastai/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai)\n", + "\n", + "Curious about how Comet can help you build better models, faster? Find out more about [Comet](https://www.comet.com/site/products/ml-experiment-tracking/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai) and our [other integrations](https://www.comet.com/docs/v2/integrations/overview/?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai)\n", + "\n", + "Get a preview for what's to come. Check out a completed experiment created from this notebook [here](https://www.comet.com/examples/comet-examples-fastai-hello-world-notebook/95862aa394984c748750ac491e02b83a?utm_source=comet-examples&utm_medium=referral&utm_campaign=github_repo_2023&utm_content=fastai)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 0. Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "xiNzBttOmKdr", + "outputId": "7929b2f0-5961-4f11-92cd-35fc6f8a5acd" + }, + "outputs": [], + "source": [ + "%pip install -U fastai comet_ml" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CMD8itjWmKdt" + }, + "source": [ + "## 1. Imports\n", + "\n", + "First, we import the comet_ml library, followed by the fastai library, and others if needed. The only requirement here is that **comet_ml be imported first**. If you forget, just restart the kernel, and import them in the proper order." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "62Tk_v45mKdu" + }, + "outputs": [], + "source": [ + "## Import this first:\n", + "import comet_ml\n", + "\n", + "from fastai.vision.all import *" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vnNnawcJmKdv" + }, + "outputs": [], + "source": [ + "EPOCHS = 5" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rBpIQV3hmKdv" + }, + "source": [ + "# 2. Comet Experiment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ucE282OimKdw", + "outputId": "be595e74-99b0-4068-e043-259e5aef96a8" + }, + "outputs": [], + "source": [ + "# Create Comet Experiment\n", + "comet_ml.init(project_name=\"comet-examples-fastai-hello-world-notebook\")\n", + "experiment = comet_ml.Experiment()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9dDHu5wkmKdw" + }, + "source": [ + "# 3. Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZII9-P-NmKdx" + }, + "source": [ + "As a simple demo, we'll start with the the MNIST_TINY dataset, In fastai, we use the `datasets.untar_data` function to download and uncompress the data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 38 + }, + "id": "sUpCpfH7mKdx", + "outputId": "cabd445c-4102-4872-c16e-c9af435a83db" + }, + "outputs": [], + "source": [ + "path = untar_data(URLs.MNIST_TINY)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Giiv4pnymKdy" + }, + "source": [ + "The path returned by the untar_data function shows where the data was saved." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "h9Qa5t3fmKdz", + "outputId": "a9d364d4-ad84-4f25-82c3-ec585d884ea6" + }, + "outputs": [], + "source": [ + "items = get_image_files(path)\n", + "tds = Datasets(\n", + " items,\n", + " [PILImageBW.create, [parent_label, Categorize()]],\n", + " splits=GrandparentSplitter()(items),\n", + ")\n", + "dls = tds.dataloaders(after_item=[ToTensor(), IntToFloatTensor()])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8eEbaCh6mKd0" + }, + "source": [ + "# 4. Model\n", + "\n", + "In this example, we will use the pre-designed Resnet18 from fastai." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "R8JJhlrumKd0", + "outputId": "f7b63acc-c55c-4367-8dd7-3d934739215c" + }, + "outputs": [], + "source": [ + "learn = vision_learner(dls, resnet18, pretrained=True, metrics=error_rate)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "q-LxaKnwmKd2" + }, + "source": [ + "## 5. Training\n", + "\n", + "In fastai, we can train differently depending on if we are running CPU or a GPU. To test, we can use the `data.device.type` property. This will create a fastai `Learner`:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GyIBFMOZmKd3" + }, + "source": [ + "Now we are ready to train the model. To tell Comet about the details, we put the call to `fit` or `fit_one_cylce`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 206 + }, + "id": "uNzBOFIUmKd3", + "outputId": "174f0f33-1dcc-42e4-e610-d449545d8080" + }, + "outputs": [], + "source": [ + "learn.fit_one_cycle(EPOCHS)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hKIrsCOGmKd3" + }, + "source": [ + "# 6. Finish" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "do2WvU2AmKd3" + }, + "source": [ + "Finall, we are ready to tell Comet that our experiment is complete. You don't need to do this is a script that ends. But in Jupyter, we need to indicate that the experiment is finished. We do that with the `experiment.end()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "HDteaKMtmKd3", + "outputId": "3b9c0829-97d0-4521-ca34-b99fedd90866" + }, + "outputs": [], + "source": [ + "experiment.end()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hx2bmd3dmKd4" + }, + "source": [ + "That's it! If you have any questions, please visit us on https://cometml.slack.com" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/fastai.ipynb b/notebooks/fastai.ipynb deleted file mode 100644 index b68e479d..00000000 --- a/notebooks/fastai.ipynb +++ /dev/null @@ -1,732 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "# MNIST Example in fastai\n", - "\n", - "This Jupyter notebook demonstrates using the **fastai** (version 1.0.38) deep learning framework with [comet.ml](https://www.comet.ml).\n", - "\n", - "In this example, we load a fastai model, called WideResNet, and train it on a small part of the MNIST_TINY dataset.\n", - "\n", - "fastai is a framework built on top of the torch Python library. \n", - "\n", - "To find out more, you might find these links helpful:\n", - "\n", - "* http://www.fast.ai/\n", - "* http://docs.fast.ai/\n", - "* http://www.fast.ai/2018/08/10/fastai-diu-imagenet/\n", - "* https://en.wikipedia.org/wiki/MNIST_database\n", - "* http://jupyter.org/\n", - "\n", - "Let's get started!\n", - "\n", - "# 0. Installation\n", - "\n", - "This example uses fastai version 1.0.38. You can install a specific version of fastai (which should also install the correct version of torch) with this command at the terminal:\n", - "\n", - "```\n", - "python -m pip install fastai==1.0.38\n", - "```\n", - "\n", - "Once you have fastai and torch installed, we are ready to import them." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Imports\n", - "\n", - "First, we import the comet_ml library, followed by the fastai library, and others if needed. The only requirement here is that **comet_ml be imported first**. If you forget, just restart the kernel, and import them in the proper order." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "## Import this first:\n", - "from comet_ml import Experiment\n", - "\n", - "## Import the deep learning framework:\n", - "import fastai\n", - "import fastai.vision\n", - "\n", - "## Additional libraries for this example:\n", - "import glob\n", - "import os" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As a simple demo, we'll start with the the MNIST_TINY dataset, In fastai, we use the `datasets.untar_data` function to download and uncompress the data:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "data path: /home/dblank/.fastai/data/mnist_tiny\n" - ] - } - ], - "source": [ - "path = fastai.datasets.untar_data(fastai.datasets.URLs.MNIST_TINY)\n", - "print(\"data path:\", path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The path returned by the untar_data function shows where the data was saved. Using the shell `!` magic, we can explore the dataset in more detail:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "715.png 7463.png 8360.png 9141.png\t9303.png\r\n", - "7288.png 7626.png 8957.png 9192.png\t9637.png\r\n" - ] - } - ], - "source": [ - "! ls ~/.fastai/data/mnist_tiny/train/3/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By poking around here, we can see that there are two categories `3` and `7` and each category has about 2,000 examples." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That is still too many for a CPU, so we trim it down to 10 in each category:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "dirname = os.path.dirname(path)\n", - "for group in [\"mnist_tiny/train/3/*.png\",\n", - " \"mnist_tiny/train/7/*.png\",\n", - " \"mnist_tiny/valid/3/*.png\",\n", - " \"mnist_tiny/valid/7/*.png\"]:\n", - " for filename in glob.glob(os.path.join(dirname, group))[10:]:\n", - " os.remove(filename)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we check again to see if we have just 10 images:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "715.png 7463.png 8360.png 9141.png\t9303.png\r\n", - "7288.png 7626.png 8957.png 9192.png\t9637.png\r\n" - ] - } - ], - "source": [ - "! ls ~/.fastai/data/mnist_tiny/train/3/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To see these images, we can use some tools from the IPython library:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import Image, display" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's see what the training set looks like for the 3 and 7 categories:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA2klEQVR4nGNgGLRAeN7n///+/9utgU3y6L9//x7ve/fvjQimXNff9/IcbEzS7/45YMhpfHtnAzb8BRbJ+CNiYDrjHxZJNk4wpfni32I2HC7m3PzvjjRWGT3Xrmv//jlhk+KZ8OsfCOxVwCLZ9u/r0lrTgCf/yrEZGqgEoiL/vebC4SAGBoHH/3xwSjLM/7cPt6Tuvy/yOCWl/v1zxSlZ9++jELoYC9SNPHf+daPLMe85zMjAwMRptObfZ010SZe//3Zs3LgPGEJ/QjGt8v0ACrvLj5Y74nQN1QAAP0VaWbYzqAQAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA8UlEQVR4nGNgGJzA3Nra6+/fv/8XWVuroMsFffwNBn+B+LgumtyT3wjJ32ekUMwE6Xt0ZbqQkP2VK7d+/1ZFllQ5vv3LVWsopxhNkkFXNhIml/sFXRIOWPPe//6cz4ZFRtLPr/z37/fZ2HRxrwa79mUIppTPoeNQrzw9JIsumQwMn7uHjmS2H7r59621NKpk0u/pbXZglum537+LUSXlfHhgTIOHvy9qYPcNEFz7/dsSRUBNFY9k0n5xGDPxLbpk1OfbM0VAwOgaUK6NE9WinPewKAPGUAC6M5LmwiQ/YMgxMAhZWqYvOHDH0tIUpz+oAwC9BpCwP7H6swAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA6klEQVR4nGNgGLzAuLDw/N//f++lokvIlj58+O4PEPz98+fnQzTJUIg4lMhHlfT59ufPh/tFISEh3UDJOjSt5X/W2sPNwOWqDX//fujBKiMcuhhoZzI2KcttF8EOqpTE1JUNd+1ZAXTJlUheOc2PJrkKGDh/N/XY9zz9+/f/CjRJpWt//+yyADJEN/75+9kIVVItwsKcB25BK7qdUAb7JqCkObqkP4TRDnTVGVTfWP79ewBkKjCEgA7KQtXI1vTn79bQ1Idgr3gzo7lWFu7Pr2Xy6IHAtRAi+el+JboUCBS8BUru88ImRV0AABG0pfAQeL/bAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA4ElEQVR4nGNgGHDQNvHf/393lJmwSNlP/PcXDFSwSMb9+ffr5kcckgyBfT7sDbgkgaAGKLeYC7uc2fO/f4/xYJfjevn37wF7rFJaSXf//n2ti0VGIe/qh79ArzySx5STmw/yIMif82QxJDf++/fv547qA8AQcgJyHVAkpfr25NoxMEgCtW4Bck2wuQkmiRVkASVTUETspGAs5tV//24VRJH81wdjdf/9+x7Nun8/94RJOIWFdZ779/ejP5pFIN89+Qr257E2dFdEnIfE8d+TE+0w3cge3gIEMtxsuDxBPwAAMXt8orqc7JQAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAuUlEQVR4nGNgGKqAqfDfv///WjmwyTEW/AWDLGySy4ASG4Ku/t2JRS7g999PKfwMhX+/OWFKuv06oA6kCv8+FcCiVYYXSGg8/luO08VJf/9m4JLjuPl3CS45roV/r7NgERddsrHA//Df3ezYNEVCQgC7hfwT583rdp23mROnWxkYrvrgkzzOjEfyLytOOc/3L7B5BQS4Y9/+zcchZ3Xp799urKaqTLv97u0MOzas2o7/7XHlw+1QEgEAuwlLnI31qloAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA00lEQVR4nGNgGJRAqnD//79/3xphlbT88+fvnz9/3i62xCKpc/jwEbcUoIqXS42xmy0csvDtnz+fonHZbQSU/eOP02nH/v79L41VxiCs+RvQYiySG44efQxy8p9WNkzJh3/A/vk5EZuZkyCS93G5xv/A379/1+GS5dkK1L6YE4es8EbsroUA409YJP1h4bLyz98sdMleWLjI/v2/Al0y7u+TRDDD58/f5RiWWTx/f7S84ujRV3/+WmE6peoZJMb/fJDA4lDJgr37CvYWFNjj8gm1AAB9dnOy21nUyQAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA50lEQVR4nGNgGJwgraNDPLSj4/Hxf/8/duihynEc+4sEnhiiSLb+RQGPkWVtXoKErl7ftq1rjfM6IHMGkuSsFfpAICoG5kwBSloiSQpxIdhLP/79e0sEq6uZlwH1/bHAKme+Ayj3swSbFHvtC6Dc0zIsUkx1+0Bu/qqARU7xAMSPv7faY0pmw4PgWwZWyce9vfuB1A8DDCvLawxZgH6p//H37xomrH5hgISQIS5Jd6BkEw45lj6gpDKqWN5JqCsmAOUeSaDI5X7/+yoGSKtdAHnGFVVjGjDg/rx9+/YzSG65DC73UAcAAI7mnZmOJtcJAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA6klEQVR4nGNgGGjAKlH2+f/fZmxS2pab/rw+d/7NYyNMuZRv37en6DAwNP7ZyY4mpXb+yyUtMEvu8R8xNMnpfy7BhJoxJBd914Ix3f52oklaO8OZbn/QJZEAXsmCPwm4JXdgOAgC1LrCJFb9Pc0NF5guiGzdn79/LlghBP7GINjCi7b//ZOCZNT8U5LIJpf+ua2K4IX9OSuKJMlc+kcXweM++PcyL5Js3l89JJ7gnj+lCMdLbULWCXTG7T/nLKFsh1V/ViGbw8CgeuDvl/OFEy6cP3/+x981qHJAexXn/QF58c+Jyd4cDAMJALQqXDBHGWEfAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAwUlEQVR4nGNgGEAgFhw8f+XVqw2C2CTP/YOA59hk5/17t3Xr1k///h3CIslSJQwkJU79+xuI02rNf/8m4ZRs+/cvEZec+a9/z9mwynCbFnz4968Nu7Y1IK884cAuuQwk+asduyRn5urzQNlYXA7icH/z7yYuSQYGv3+/7HBKyv/7Nw2f5BYsolLMIMrp37+JGHIGj/5NluTjc77575sGhmT8v3///4FxB6apWl8gkn+xyAEjqyKo/8OKObo4nUofAACkGGFaoVnXtwAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAz0lEQVR4nM2RIReCMBSF+RFkrVCH2f/gqp2qVI2arawTxUyFTLVjBOqMbLiHyg7vHZrBu7Bzds/HfffhOP8pASq00qoVPvK46uyRG+S2hnnKRwm3VuflxFwwxjzPNfcW6JqTYH8VRALIXpHcoxxz75hb34Y8OJmLvKT6cs0Oc1yPXBuQafaxUT6wOTE/H6iApUXeclPDVnggC5stpXOmIwyLnuxOoC8y5SGEJD/MyIaiole2aTkZaPI/1RUNm0hLXkgRj7E4TrumPrHZGr/SC2Yysve3/OfsAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAxElEQVR4nGNgGJJA4l8GO4TlNoEXXZJ5379tsSCG4JW/hRhaU7/+/Xl+asn+B/9eSGIarHP6799/f4HYE5u1grH1O4/++5eG012T/77BKWf58zema2Ag4++72FjrWGslTCmFjv///v0D4Z2caFK8RR+Brr1+rrjYUxxdjmHi3y9//y3BEIaAheUNf5/w43CMEu/dfztxOjXx798QnJKT/27EKcfw7O8CnHK6X/+54pSs+LuVDafk6b8JuK3891cPtySJAACJW1LwQ3vd7gAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA2klEQVR4nGNgGIZA7tltRQiLPyHBFE0y6t/fJAXLgkn79v389/cOF4qcwrl/f4EASmxBlYwEi7+8tLdOU/7f3zRUG1/8XSIgIMDNCmQf/3efA0XS798/byhT4sv/ThQ5iXt/D0KZbLv+vhBAdc7fj85Q5oy/f81R/aGeD/UjS+v3v/0MOIDty79X2XDIKT/799sch5zQqn9/O3DIcS7++2+bBA7J2L9/D3PgkAv4+veJMg45lWv//kXhkOPa+/dvOwsOybZ/f9tw+bDy9792XHLmN/5+weUJ7AAAEiNg+iSCKdoAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAuElEQVR4nGNgGK5g3n91BgY9PSwyTEarvvyzn3zty1dFTEmjv3///vsLItTRpQTsnsAkdzCjybncBIq/nd3qdP/vX3F0jZ3//v9cysHAoPzg30EMC70urzMC0Yv+vsLmWDBw//h3Ei45hot/Lwngkiv697cTlxzXxf/rcBq6/O/PCFxykh/+LsWpsenv32BccgKv/17HqTHg378EXHIyN//+MsMlGfn3bwlOGy/+/YAzcJLwBA42AAAadFU7QTVI2gAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAhklEQVR4nGNgGH6AidUjMtIjYfX3l+poMox+Gy7/A4K/v//+e8aNJun7/++nDeXe3jaaAe93o5tpW+EAZVn/w5BEAOt/M3FLVv/TxC15/rsQbslHB3DLkS/J9RyPpPk/vJKT8ElG4pPUwy2Z+S8Xt2QXPsnF/0JwS957I4hTTvDlEdwa0QAA+hI1MRkxgA8AAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAuklEQVR4nGNgGH7g8/4dO2rttbW1q3pZ0eVYzv8Dgv8g4p8MumT4vySLjGXH1k5a8O+rGLrkyneCEIbFvx0YVl55wAFhlP/rxZA8eAfqjL5/B9HlXH/+kQYzDH78g1kAB0Yv06Gspn/T2NG18rBAaLVvj1gw7ISBxH+LccoxRP4rxCnHuO5fJE5Jo38PRHFKVv5rwG3l7n+ZOOU4nl/F7RHvf6twm/rvOUZcwoHBv724NTb9W4lbEh0AACiJRD8v4XXFAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAoUlEQVR4nGNgGIaAQ7TTTkhIqP7vGnZMyZp/MBCJIRf2FSj86eTJh9gkLwDl2owYGIr+fXPAIvknWYgBJPkQ08p5/36C6aJ/6zElWZY1g+nH/7Jxeojz1Q99nJKr/n3GKcdw7d91nHI2X//V4JT0+/9PAqfkrn/TGXHJWf/914vbyn//PPFJiuOUjP63E0tkQsGmf/NxyuGXrPlniFsSDQAAc7JKpClSdBQAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAA1ElEQVR4nGNgGNqAIyQap5zP4n9v5FCFMr4uB5KK9v1f//77dx9N/fR/X/fs2fP939/VEb//TUKT7PkHBG82J4swGP/56ogmyV+yNdNPAsRa828Zbsee/jcfj+QXfTAthSrMzMDMxBb97blz9baF11Y/uwaXKJ4zZ87ROUe3/IOA86eAAC55/d//fyAEBMdmp5qwoRjp6G7AGqVswyr9750hTufk/avA7dYL/0xwyul+xi3JMfffeTZckgL//p3CJQeSLMQpyXb8twpOSQYLO9xyJAEAjgNg8t+T8AgAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAo0lEQVR4nGNgGH7AcPX/DTgl5/39++fHj1tlKljk5v75CwFf7xZoo0u+BorXV09dC5JPRJfc8ffvRlYGBjbJ1r9/r6FLZv79uwLMYNvy9zG6pMALqCTDyr9vlNFloz/bwiT/ZmC4V5cDTHm9+ftCFIt3wGDDX0w7kSUbcclxH8DiTxgw/fv3mRweyRM4rdT7/DcapyTbjp+BOCVNMUOW5pIoAAA77VVfXHcKRQAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAn0lEQVR4nGNgGJrA4/HeUHlXaSasktpX/4HAhVXx2GQ5xDUTdBs3/1uE23SBf3m4JS3wScb908AtufwBJ27JXauxCHLvW54fETFh+79ZEeH66JIc9ff/fX7X825deLiNOqZeXjVpSe+fEbhtrPjNh1OO7c4s3BrN/8Xhllx+ghennNz3Htwa7X774ZRjWXkVt0blf8txS0oemYBbkjQAABnLM4hoOje3AAAAAElFTkSuQmCC\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAAAvElEQVR4nGNgGImATbi1dc6/f9eCMaW42o7++wsGc9ClZPwO/f3779fNm48wJLnaX4G0fPNh4Er6+7cVRY5zNlDmwbE2VwYGv79/D3ChSvZsibBRYAOy7F/9XayBw72ce/8u5sEhx33372UuHHIMjn/fWeOSs/75yQKXnMPPnya45Lhe/F2IS8700N9NuOQYlv19YYhLLujDM5wWCl1+aoZLTrDubwNOCw3/TmDHJSdw+LESTo0znmljEQUA6ZBPtyGAQIYAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "dirname = os.path.dirname(path)\n", - "for group in [\"mnist_tiny/train/3/*.png\", \n", - " \"mnist_tiny/train/7/*.png\"]:\n", - " for filename in glob.glob(os.path.join(dirname, group)):\n", - " display(Image(filename))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So, all of the files under `mnist_tiny/train/3/` are pictures of 3's, and `mnist_tiny/train/7/` are pictures of 7's.\n", - "\n", - "Now we get the image data from the folder. To train a dataset in fastai, we must create a `DataBunch`. In this case, we can use `ImageDataBunch` in the `fastai.vision` library. We pick 10 as the batch size (`bs`) because there are only 10 images in each category." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "data = fastai.vision.ImageDataBunch.from_folder(path, bs=10) # bs: batch size" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That's better for this simple test. Now we can create a model, and train the network:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Model\n", - "\n", - "In this example, we will use the pre-designed WideresNet from fastai. The model is also known as wrn_22." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "model = fastai.vision.models.WideResNet(num_groups=3,\n", - " N=3,\n", - " num_classes=10,\n", - " k=6,\n", - " drop_p=0.)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That's it! Often, you would probably build your own model, or adjust a default model. To see more on model building in fastai, see:\n", - "\n", - "* http://files.fast.ai/models/\n", - "* http://course.fast.ai/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Experiment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order for comet.ml to log your experiment and results, you need to create an Experiment instance. To do this, you'll need two items:\n", - "\n", - "* a Comet `api_key`\n", - "* a `project_name`\n", - "\n", - "You can find your Comet api_key when you log in to https://www.comet.ml and click on your project. You should see a screen that looks similar to:\n", - "\n", - "![comet login screen](comet-key.png)\n", - "\n", - "Click on the API key to copy the key to your clipboard. \n", - "\n", - "It is recommended that you put your COMET_API_KEY in a `.env` key in the current directory. You can do that using the following code. Put it in a cell, replace the `...` with your key, and then delete the cell. That way your key stays private.\n", - "\n", - "```ipython\n", - "%%writefile .env\n", - "\n", - "COMET_API_KEY=...\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is also recommended that you use your project_name in the cell, so you can match the results with this code. You can make up a new name, or add this experiment to a project that already exists." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "COMET INFO: Experiment is live on comet.ml https://www.comet.ml/cometpublic/comet-notebooks/d21f94a1c71841d2961da1e6ddb5ab20\n", - "\n" - ] - } - ], - "source": [ - "experiment = Experiment(project_name=\"comet_notebooks\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you get the error that ends with:\n", - "\n", - "
\n",
-    "ValueError: Comet.ml requires an API key. Please provide as the first argument to Experiment(api_key) or as an environment variable named COMET_API_KEY \n",
-    "
\n", - "\n", - "then that means that either you don't have an `.env` file in this directory, or the key is invalid.\n", - "\n", - "Otherwise, you should see the message:\n", - "\n", - "
\n",
-    "COMET INFO: Experiment is live on comet.ml https://www.comet.ml/...\n",
-    "
\n", - "\n", - "If you click the URL, then a new page will open up. But, even better, you can execute the following line to see the experiment in the current notebook:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "experiment.display()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By the way, the line `experiment.display()` works when you are at the console too. It will open up a window in your browser.\n", - "\n", - "Now, we are ready for training!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Training\n", - "\n", - "In fastai, we can train differently depending on if we are running CPU or a GPU. To test, we can use the `data.device.type` property. This will create a fastai `Learner`:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "if data.device.type == 'cpu':\n", - " learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy)\n", - "else: # GPU:\n", - " learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy).to_fp16()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to train the model. To tell Comet about the details, we put the call to `fit` or `fit_one_cylce` inside an indented block under `experiment.train()`:" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total time: 00:10\n", - "epoch train_loss valid_loss accuracy\n", - "1 2.331475 2.306675 0.000000 (00:05)\n", - "2 1.933559 2.272467 0.000000 (00:05)\n", - "\n" - ] - } - ], - "source": [ - "with experiment.train():\n", - " learn.fit_one_cycle(2, 3e-3, wd=0.4, div_factor=10, pct_start=0.5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Logging" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In fastai, Comet will automatically log:\n", - "\n", - "* the model description\n", - "* the training loss\n", - "* the training validation\n", - "* the source code\n", - "\n", - "To log other items manually, you can use any of the following:\n", - "\n", - "* `experiment.log_html(HTML_STRING)`\n", - "* `experiment.html_log_url(URL_STRING)`\n", - "* `experiment.image(FILENAME)`\n", - "* `experiment.log_dataset_hash(DATASET)`\n", - "* `experiment.log_other(KEY, VALUE)`\n", - "* `experiment.log_metric(NAME, VALUE)`\n", - "* `experiment.log_parameter(PARAMETER, VALUE)`\n", - "* `experiment.log_figure(NAME, FIGURE)`\n", - "\n", - "For complete details, please see: \n", - "\n", - "https://www.comet.ml/docs/python-sdk/Experiment/#experiment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Finish" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finall, we are ready to tell Comet that our experiment is complete. You don't need to do this is a script that ends. But in Jupyter, we need to indicate that the experiment is finished. We do that with the `experiment.end()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "COMET INFO: Uploading stats to Comet before program termination (may take several seconds)\n", - "COMET INFO: Experiment is live on comet.ml https://www.comet.ml/cometpublic/comet-notebooks/d21f94a1c71841d2961da1e6ddb5ab20\n", - "\n" - ] - } - ], - "source": [ - "experiment.end()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "That's it! If you have any questions, please visit us on https://cometml.slack.com" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.6" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}