Skip to content

Commit

Permalink
Add demo colab
Browse files Browse the repository at this point in the history
  • Loading branch information
jinjingforever authored May 9, 2024
1 parent 9beac91 commit 8078362
Showing 1 changed file with 187 additions and 0 deletions.
187 changes: 187 additions & 0 deletions example_colabs/model_explorer_demo.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "2MDUUcFa7O2u"
},
"source": [
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google-ai-edge/model-explorer/blob/main/example_colabs/model_explorer_demo.ipynb) \n",
"\n",
"# Install Model Explorer and dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true,
"id": "me12eMftdu4H"
},
"outputs": [],
"source": [
"# Install tflite & model-explorer dependencies.\n",
"!pip install tflite\n",
"!pip install --no-deps ai-edge-model-explorer model-explorer-adapter\n",
"\n",
"# Install kagglehub to load a model.\n",
"!pip install kagglehub --no-deps"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Im5BQ2Qz7na_"
},
"source": [
"# Download MobileNet v3 from Kaggle"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xaSxh0rQf4Fj"
},
"outputs": [],
"source": [
"# TODO: Workaround until Eric's fixes\n",
"import kagglehub\n",
"\n",
"# Download latest version\n",
"# TODO: Update this to use mobilebert or selected model.\n",
"path = kagglehub.model_download(\"google/mobilenet-v3/tfLite/large-075-224-classification\")\n",
"model_path = f\"{path}/1.tflite\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "mWm4BE_I70hi"
},
"source": [
"# Run the model with random test data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6cuAg62OvfTK"
},
"outputs": [],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"\n",
"# Load the TFLite model and allocate tensors.\n",
"interpreter = tf.lite.Interpreter(model_path=model_path)\n",
"interpreter.allocate_tensors()\n",
"\n",
"# Get input and output tensors.\n",
"input_details = interpreter.get_input_details()\n",
"output_details = interpreter.get_output_details()\n",
"\n",
"# Generate random input data.\n",
"for input_detail in input_details:\n",
" input_shape = input_detail['shape']\n",
" input_data = np.array(np.random.random_sample(input_shape), dtype=input_detail['dtype'])\n",
" interpreter.set_tensor(input_detail['index'], input_data)\n",
"\n",
"# Test the model on random input data.\n",
"interpreter.invoke()\n",
"\n",
"# The function `get_tensor()` returns a copy of the tensor data.\n",
"# Use `tensor()` in order to get a pointer to the tensor.\n",
"for output_detail in output_details:\n",
" print(f\"Output for {output_detail['name']}\")\n",
" output_data = interpreter.get_tensor(output_detail['index'])\n",
" print(output_data)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "HhwvzBUIO9SZ"
},
"source": [
"# Download the TF benchmark utility and run it"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true,
"id": "2FLsz9OsC6w_"
},
"outputs": [],
"source": [
"%env MODEL_PATH=$model_path\n",
"\n",
"!mkdir -p /tmp/data\n",
"%cd /tmp/data\n",
"\n",
"# Download the tflite model benchmark binary.\n",
"!wget -nc https://storage.googleapis.com/tensorflow-nightly-public/prod/tensorflow/release/lite/tools/nightly/latest/linux_x86-64_benchmark_model\n",
"!chmod +x /tmp/data/linux_x86-64_benchmark_model\n",
"\n",
"# Run the benchmark locally only using CPU kernels.\n",
"# TODO: Update this once the feature is live.\n",
"# !./linux_x86-64_benchmark_model --graph=$MODEL_PATH --use_xnnpack=false --num_threads=4\n",
"\n",
"# # Run the benchmark locally with XNNPACK kernels enabled.\n",
"# # TODO: Update this once the feature is live.\n",
"# !./linux_x86-64_benchmark_model --graph=$MODEL_PATH --use_xnnpack=true --num_threads=4\n",
"\n",
"# TODO: Remove the below once the feature is live.\n",
"!wget -nc https://storage.googleapis.com/tfweb/model-explorer-demo/mv3-cpu-op-profile.json\n",
"!wget -nc https://storage.googleapis.com/tfweb/model-explorer-demo/mv3-xnnpack-op-profile.json\n",
"CPU_PROFILING_JSON_PATH=\"/tmp/data/mv3-cpu-op-profile.json\"\n",
"XNNPACK_PROFILING_JSON_PATH=\"/tmp/data/mv3-xnnpack-op-profile.json\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "oCgFNFP4WAzE"
},
"source": [
"# Visualize the model with the per op latency overlaid"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6SVyHrthM_hz"
},
"outputs": [],
"source": [
"import model_explorer\n",
"\n",
"config = model_explorer.config()\n",
"(config\n",
" .add_model_from_path(model_path)\n",
" .add_node_data_from_path(CPU_PROFILING_JSON_PATH)\n",
" .add_node_data_from_path(XNNPACK_PROFILING_JSON_PATH))\n",
"\n",
"model_explorer.visualize_from_config(config)"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

0 comments on commit 8078362

Please sign in to comment.