From e7943427f48b04bb2f48de73fc2471e0a46bcfba Mon Sep 17 00:00:00 2001
From: Michael Sharp <51342856+michaelgsharp@users.noreply.github.com>
Date: Wed, 9 Oct 2024 11:07:20 -0600
Subject: [PATCH] Load onnx model from Stream working (#7254)
---
.../OnnxCatalog.cs | 222 ++++++++++++++++++
.../OnnxTransform.cs | 122 +++++++++-
src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs | 31 ++-
.../OnnxTransformTests.cs | 53 +++++
4 files changed, 421 insertions(+), 7 deletions(-)
diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs b/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs
index 00497e1a04..210e13e849 100644
--- a/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs
+++ b/src/Microsoft.ML.OnnxTransformer/OnnxCatalog.cs
@@ -4,6 +4,7 @@
using System;
using System.Collections.Generic;
+using System.IO;
using Microsoft.ML.Data;
using Microsoft.ML.Transforms;
using Microsoft.ML.Transforms.Onnx;
@@ -40,6 +41,34 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the input column.
+ /// Input/output columns are determined based on the input/output columns of the provided ONNX model.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ ///
+ /// The name/type of input columns must exactly match name/type of the ONNX model inputs.
+ /// The name/type of the produced output columns will match name/type of the ONNX model outputs.
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ /// The transform's catalog.
+ /// The containing the model bytes.
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog, Stream modelBytes, int? gpuDeviceId = null, bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
+ }
+
///
/// Create a , which applies a pre-trained Onnx model to the input column.
/// Input/output columns are determined based on the input/output columns of the provided ONNX model.
@@ -76,6 +105,42 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the input column.
+ /// Input/output columns are determined based on the input/output columns of the provided ONNX model.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ ///
+ /// The name/type of input columns must exactly match name/type of the ONNX model inputs.
+ /// The name/type of the produced output columns will match name/type of the ONNX model outputs.
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ /// The transform's catalog.
+ /// The containing the model bytes.
+ /// ONNX shapes to be used over those loaded from .
+ /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
+ /// is particularly useful for working with variable dimension inputs and outputs.
+ ///
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ Stream modelBytes,
+ IDictionary shapeDictionary,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
+ }
+
///
/// Create a , which applies a pre-trained Onnx model to the column.
/// Please refer to to learn more about the necessary dependencies,
@@ -108,6 +173,38 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName }, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the column.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ /// The transform's catalog.
+ /// The output column resulting from the transformation.
+ /// The input column.
+ /// The containing the model bytes.
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ string outputColumnName,
+ string inputColumnName,
+ Stream modelBytes,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName }, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
+ }
+
///
/// Create a using the specified .
/// Please refer to to learn more about the necessary dependencies,
@@ -163,6 +260,44 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the column.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ /// The transform's catalog.
+ /// The output column resulting from the transformation.
+ /// The input column.
+ /// The containing the model bytes.
+ /// ONNX shapes to be used over those loaded from .
+ /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
+ /// is particularly useful for working with variable dimension inputs and outputs.
+ ///
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ string outputColumnName,
+ string inputColumnName,
+ Stream modelBytes,
+ IDictionary shapeDictionary,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, new[] { outputColumnName }, new[] { inputColumnName },
+ modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
+ }
+
///
/// Create a , which applies a pre-trained Onnx model to the columns.
/// Please refer to to learn more about the necessary dependencies,
@@ -188,6 +323,31 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the columns.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ /// The transform's catalog.
+ /// The output columns resulting from the transformation.
+ /// The input columns.
+ /// The containing the model bytes.
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ string[] outputColumnNames,
+ string[] inputColumnNames,
+ Stream modelBytes,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse);
+ }
+
///
/// Create a , which applies a pre-trained Onnx model to the columns.
/// Please refer to to learn more about the necessary dependencies,
@@ -218,6 +378,36 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the columns.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ /// The transform's catalog.
+ /// The output columns resulting from the transformation.
+ /// The input columns.
+ /// The containing the model bytes.
+ /// ONNX shapes to be used over those loaded from .
+ /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
+ /// is particularly useful for working with variable dimension inputs and outputs.
+ ///
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ string[] outputColumnNames,
+ string[] inputColumnNames,
+ Stream modelBytes,
+ IDictionary shapeDictionary,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary);
+ }
+
///
/// Create a , which applies a pre-trained Onnx model to the columns.
/// Please refer to to learn more about the necessary dependencies,
@@ -250,6 +440,38 @@ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog
return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelFile, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary, recursionLimit);
}
+ ///
+ /// Create a , which applies a pre-trained Onnx model to the columns.
+ /// Please refer to to learn more about the necessary dependencies,
+ /// and how to run it on a GPU.
+ ///
+ /// The transform's catalog.
+ /// The output columns resulting from the transformation.
+ /// The input columns.
+ /// The containing the model bytes.
+ /// ONNX shapes to be used over those loaded from .
+ /// For keys use names as stated in the ONNX model, e.g. "input". Stating the shapes with this parameter
+ /// is particularly useful for working with variable dimension inputs and outputs.
+ ///
+ /// Optional GPU device ID to run execution on, to run on CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ ///
+ /// If the gpuDeviceId value is the value will be used if it is not .
+ ///
+ public static OnnxScoringEstimator ApplyOnnxModel(this TransformsCatalog catalog,
+ string[] outputColumnNames,
+ string[] inputColumnNames,
+ Stream modelBytes,
+ IDictionary shapeDictionary,
+ int? gpuDeviceId = null,
+ bool fallbackToCpu = false,
+ int recursionLimit = 100)
+ {
+ var (env, gpuDeviceIdToUse, fallbackToCpuToUse) = GetGpuDeviceId(catalog, gpuDeviceId, fallbackToCpu);
+ return new OnnxScoringEstimator(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceIdToUse, fallbackToCpuToUse, shapeDictionary: shapeDictionary, recursionLimit);
+ }
+
///
/// Create , which applies one of the pre-trained DNN models in
/// to featurize an image.
diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs b/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs
index 99d431f882..c4413dd50e 100644
--- a/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs
+++ b/src/Microsoft.ML.OnnxTransformer/OnnxTransform.cs
@@ -96,6 +96,9 @@ internal sealed class Options : TransformInputBase
[Argument(ArgumentType.AtMostOnce, HelpText = "Controls the number of threads to use to run the model.", SortOrder = 8)]
public int? IntraOpNumThreads = null;
+
+ // No argument cause it can't be used via cmd
+ public Stream ModelBytes = null;
}
///
@@ -253,12 +256,22 @@ private OnnxTransformer(IHostEnvironment env, Options options, byte[] modelBytes
{
if (modelBytes == null)
{
- // Entering this region means that the model file is passed in by the user.
- Host.CheckNonWhiteSpace(options.ModelFile, nameof(options.ModelFile));
- Host.CheckIO(File.Exists(options.ModelFile), "Model file {0} does not exists.", options.ModelFile);
- // Because we cannot delete the user file, ownModelFile should be false.
- Model = new OnnxModel(options.ModelFile, options.GpuDeviceId, options.FallbackToCpu, ownModelFile: false, shapeDictionary: shapeDictionary, options.RecursionLimit,
- options.InterOpNumThreads, options.IntraOpNumThreads);
+ if (options.ModelBytes == null)
+ {
+ // Entering this region means that the model file is passed in by the user.
+ Host.CheckNonWhiteSpace(options.ModelFile, nameof(options.ModelFile));
+ Host.CheckIO(File.Exists(options.ModelFile), "Model file {0} does not exists.", options.ModelFile);
+ // Because we cannot delete the user file, ownModelFile should be false.
+ Model = new OnnxModel(options.ModelFile, options.GpuDeviceId, options.FallbackToCpu, ownModelFile: false, shapeDictionary: shapeDictionary, options.RecursionLimit,
+ options.InterOpNumThreads, options.IntraOpNumThreads);
+ }
+ else
+ {
+ // Entering this region means that the model bytes are passed in by the user.
+ Host.CheckValue(options.ModelBytes, nameof(options.ModelBytes));
+
+ Model = OnnxModel.CreateFromStream(options.ModelBytes, env, options.GpuDeviceId, options.FallbackToCpu, shapeDictionary: shapeDictionary, options.RecursionLimit);
+ }
}
else
{
@@ -311,6 +324,32 @@ internal OnnxTransformer(IHostEnvironment env, string modelFile, int? gpuDeviceI
{
}
+ ///
+ /// Transform for scoring ONNX models. Input data column names/types must exactly match
+ /// all model input names. All possible output columns are generated, with names/types
+ /// specified by the model.
+ ///
+ /// The environment to use.
+ /// Model file path.
+ /// Optional GPU device ID to run execution on. Null for CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ internal OnnxTransformer(IHostEnvironment env, Stream modelBytes, int? gpuDeviceId = null,
+ bool fallbackToCpu = false, IDictionary shapeDictionary = null, int recursionLimit = 100)
+ : this(env, new Options()
+ {
+ ModelBytes = modelBytes,
+ InputColumns = new string[] { },
+ OutputColumns = new string[] { },
+ GpuDeviceId = gpuDeviceId,
+ FallbackToCpu = fallbackToCpu,
+ CustomShapeInfos = shapeDictionary?.Select(pair => new CustomShapeInfo(pair.Key, pair.Value)).ToArray(),
+ RecursionLimit = recursionLimit
+ })
+ {
+ }
+
///
/// Transform for scoring ONNX models. Input data column names/types must exactly match
/// all model input names. Only the output columns specified will be generated.
@@ -343,6 +382,38 @@ internal OnnxTransformer(IHostEnvironment env, string[] outputColumnNames, strin
{
}
+ ///
+ /// Transform for scoring ONNX models. Input data column names/types must exactly match
+ /// all model input names. Only the output columns specified will be generated.
+ ///
+ /// The environment to use.
+ /// The output columns to generate. Names must match model specifications. Data types are inferred from model.
+ /// The name of the input data columns. Must match model's input names.
+ /// Model as bytes.
+ /// Optional GPU device ID to run execution on. Null for CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ /// Controls the number of threads used to parallelize the execution of the graph (across nodes).
+ /// Controls the number of threads to use to run the model.
+ internal OnnxTransformer(IHostEnvironment env, string[] outputColumnNames, string[] inputColumnNames, Stream modelBytes, int? gpuDeviceId = null, bool fallbackToCpu = false,
+ IDictionary shapeDictionary = null, int recursionLimit = 100,
+ int? interOpNumThreads = null, int? intraOpNumThreads = null)
+ : this(env, new Options()
+ {
+ ModelBytes = modelBytes,
+ InputColumns = inputColumnNames,
+ OutputColumns = outputColumnNames,
+ GpuDeviceId = gpuDeviceId,
+ FallbackToCpu = fallbackToCpu,
+ CustomShapeInfos = shapeDictionary?.Select(pair => new CustomShapeInfo(pair.Key, pair.Value)).ToArray(),
+ RecursionLimit = recursionLimit,
+ InterOpNumThreads = interOpNumThreads,
+ IntraOpNumThreads = intraOpNumThreads
+ })
+ {
+ }
+
private protected override void SaveModel(ModelSaveContext ctx)
{
Host.AssertValue(ctx);
@@ -906,6 +977,24 @@ internal OnnxScoringEstimator(IHostEnvironment env, string modelFile, int? gpuDe
{
}
+ ///
+ /// Transform for scoring ONNX models. Input data column names/types must exactly match
+ /// all model input names. All possible output columns are generated, with names/types
+ /// specified by model.
+ ///
+ /// The environment to use.
+ /// Model as bytes.
+ /// Optional GPU device ID to run execution on. Null for CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ [BestFriend]
+ internal OnnxScoringEstimator(IHostEnvironment env, Stream modelBytes, int? gpuDeviceId = null, bool fallbackToCpu = false,
+ IDictionary shapeDictionary = null, int recursionLimit = 100)
+ : this(env, new OnnxTransformer(env, new string[] { }, new string[] { }, modelBytes, gpuDeviceId, fallbackToCpu, shapeDictionary, recursionLimit))
+ {
+ }
+
///
/// Transform for scoring ONNX models. Input data column names/types must exactly match
/// all model input names. Only the output columns specified will be generated.
@@ -932,6 +1021,27 @@ internal OnnxScoringEstimator(IHostEnvironment env, OnnxTransformer transformer)
{
}
+ ///
+ /// Transform for scoring ONNX models. Input data column names/types must exactly match
+ /// all model input names. Only the output columns specified will be generated.
+ ///
+ /// The environment to use.
+ /// The output columns to generate. Names must match model specifications. Data types are inferred from model.
+ /// The name of the input data columns. Must match model's input names.
+ /// Model bytes in memory.
+ /// Optional GPU device ID to run execution on. Null for CPU.
+ /// If GPU error, raise exception or fallback to CPU.
+ ///
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ /// Controls the number of threads used to parallelize the execution of the graph (across nodes).
+ /// Controls the number of threads to use to run the model.
+ internal OnnxScoringEstimator(IHostEnvironment env, string[] outputColumnNames, string[] inputColumnNames, Stream modelBytes,
+ int? gpuDeviceId = null, bool fallbackToCpu = false, IDictionary shapeDictionary = null, int recursionLimit = 100,
+ int? interOpNumThreads = null, int? intraOpNumThreads = null)
+ : this(env, new OnnxTransformer(env, outputColumnNames, inputColumnNames, modelBytes, gpuDeviceId, fallbackToCpu, shapeDictionary, recursionLimit, interOpNumThreads, intraOpNumThreads))
+ {
+ }
+
///
/// Returns the of the schema which will be produced by the transformer.
/// Used for schema propagation and verification in a pipeline.
diff --git a/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs b/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
index 68d9290676..9dbd92c3c1 100644
--- a/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
+++ b/src/Microsoft.ML.OnnxTransformer/OnnxUtils.cs
@@ -353,7 +353,7 @@ public static OnnxModel CreateFromBytes(byte[] modelBytes, IHostEnvironment env)
///
/// Create an OnnxModel from a byte[]. Set execution to GPU if required.
- /// Usually, a ONNX model is consumed by as a file.
+ /// Usually, an ONNX model is consumed by as a file.
/// With and
/// ,
/// it's possible to use in-memory model (type: byte[]) to create .
@@ -376,6 +376,35 @@ public static OnnxModel CreateFromBytes(byte[] modelBytes, IHostEnvironment env,
ownModelFile: true, shapeDictionary: shapeDictionary, recursionLimit);
}
+ ///
+ /// Create an OnnxModel from a of byte. Set execution to GPU if required.
+ /// Usually, an ONNX model is consumed by as a file.
+ /// With and
+ /// ,
+ /// it's possible to use in-memory model (type: byte[]) to create .
+ ///
+ /// Bytes of the serialized model.
+ /// IHostEnvironment
+ /// GPU device ID to execute on. Null for CPU.
+ /// If true, resumes CPU execution quietly upon GPU error.
+ /// User-provided shapes. If the key "myTensorName" is associated
+ /// with the value [1, 3, 5], the shape of "myTensorName" will be set to [1, 3, 5].
+ /// The shape loaded from would be overwritten.
+ /// Optional, specifies the Protobuf CodedInputStream recursion limit. Default value is 100.
+ /// An
+ public static OnnxModel CreateFromStream(Stream modelBytes, IHostEnvironment env, int? gpuDeviceId = null, bool fallbackToCpu = false,
+ IDictionary shapeDictionary = null, int recursionLimit = 100)
+ {
+ var tempModelFile = Path.Combine(((IHostEnvironmentInternal)env).TempFilePath, Path.GetRandomFileName());
+ using (var fileStream = File.Create(tempModelFile))
+ {
+ modelBytes.Seek(0, SeekOrigin.Begin);
+ modelBytes.CopyTo(fileStream);
+ }
+ return new OnnxModel(tempModelFile, gpuDeviceId, fallbackToCpu,
+ ownModelFile: false, shapeDictionary: shapeDictionary, recursionLimit);
+ }
+
///
/// Uses an open session to score a list of NamedOnnxValues.
///
diff --git a/test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs b/test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs
index 8d6646fe39..8bae1e9c26 100644
--- a/test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs
+++ b/test/Microsoft.ML.OnnxTransformerTest/OnnxTransformTests.cs
@@ -232,6 +232,59 @@ public void TestOldSavingAndLoading(int? gpuDeviceId, bool fallbackToCpu)
}
}
+ [OnnxFact]
+ public void OnnxStreamWorkout()
+ {
+ var modelFile = Path.Combine(Directory.GetCurrentDirectory(), "squeezenet", "00000001", "model.onnx");
+
+ using FileStream fileStream = File.OpenRead(modelFile);
+
+ var env = new MLContext(1);
+ var imageHeight = 224;
+ var imageWidth = 224;
+ var dataFile = GetDataPath("images/images.tsv");
+ var imageFolder = Path.GetDirectoryName(dataFile);
+
+ var data = ML.Data.LoadFromTextFile(dataFile, new[] {
+ new TextLoader.Column("imagePath", DataKind.String, 0),
+ new TextLoader.Column("name", DataKind.String, 1)
+ });
+ // Note that CamelCase column names are there to match the TF graph node names.
+ var pipe = ML.Transforms.LoadImages("data_0", imageFolder, "imagePath")
+ .Append(ML.Transforms.ResizeImages("data_0", imageHeight, imageWidth))
+ .Append(ML.Transforms.ExtractPixels("data_0", interleavePixelColors: true))
+ .Append(ML.Transforms.ApplyOnnxModel("softmaxout_1", "data_0", fileStream, gpuDeviceId: _gpuDeviceId, fallbackToCpu: _fallbackToCpu));
+
+ TestEstimatorCore(pipe, data);
+
+ var model = pipe.Fit(data);
+ var result = model.Transform(data);
+
+ // save and reload the model
+ var tempPath = Path.GetTempFileName();
+ ML.Model.Save(model, data.Schema, tempPath);
+ var loadedModel = ML.Model.Load(tempPath, out DataViewSchema modelSchema);
+ (loadedModel as IDisposable)?.Dispose();
+
+ var softmaxOutCol = result.Schema["softmaxout_1"];
+
+ using (var cursor = result.GetRowCursor(softmaxOutCol))
+ {
+ var buffer = default(VBuffer);
+ var getter = cursor.GetGetter>(softmaxOutCol);
+ var numRows = 0;
+ while (cursor.MoveNext())
+ {
+ getter(ref buffer);
+ Assert.Equal(1000, buffer.Length);
+ numRows += 1;
+ }
+ Assert.Equal(4, numRows);
+ }
+ (model as IDisposable)?.Dispose();
+ File.Delete(tempPath);
+ }
+
[OnnxFact]
public void OnnxWorkout()
{