Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
NonStatic2014 committed Mar 7, 2019
2 parents d09ac52 + d40a9f8 commit e09e156
Show file tree
Hide file tree
Showing 29 changed files with 824 additions and 403 deletions.
2 changes: 1 addition & 1 deletion cmake/onnxruntime_python.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ elseif (APPLE)
BUILD_WITH_INSTALL_RPATH TRUE
INSTALL_RPATH_USE_LINK_PATH FALSE)
else()
target_link_libraries(onnxruntime_pybind11_state ${onnxruntime_pybind11_state_libs} ${PYTHON_LIBRARY} ${ONNXRUNTIME_SO_LINK_FLAG} debug ${onnxruntime_EXTERNAL_LIBRARIES_DEBUG} optimized ${onnxruntime_EXTERNAL_LIBRARIES})
target_link_libraries(onnxruntime_pybind11_state PRIVATE ${onnxruntime_pybind11_state_libs} ${PYTHON_LIBRARY} ${ONNXRUNTIME_SO_LINK_FLAG} debug ${onnxruntime_EXTERNAL_LIBRARIES_DEBUG} optimized ${onnxruntime_EXTERNAL_LIBRARIES})
set_target_properties(onnxruntime_pybind11_state PROPERTIES LINK_FLAGS "-Xlinker -rpath=\$ORIGIN")
endif()

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
// Copyright(c) Microsoft Corporation.All rights reserved.
// Licensed under the MIT License.
//

#include <assert.h>
#include <onnxruntime_c_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <vector>

//*****************************************************************************
// helper function to check for status
#define CHECK_STATUS(expr) \
{ \
OrtStatus* onnx_status = (expr); \
if (onnx_status != NULL) { \
const char* msg = OrtGetErrorMessage(onnx_status); \
fprintf(stderr, "%s\n", msg); \
OrtReleaseStatus(onnx_status); \
exit(1); \
} \
}

int main(int argc, char* argv[]) {
//*************************************************************************
// initialize enviroment...one enviroment per process
// enviroment maintains thread pools and other state info
OrtEnv* env;
CHECK_STATUS(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env));

// initialize session options if needed
OrtSessionOptions* session_option = OrtCreateSessionOptions();
OrtSetSessionThreadPoolSize(session_option, 1);

//*************************************************************************
// create session and load model into memory
// using squeezenet version 1.3
// URL = https://github.com/onnx/models/tree/master/squeezenet
OrtSession* session;
const wchar_t* model_path = L"squeezenet.onnx";
CHECK_STATUS(OrtCreateSession(env, model_path, session_option, &session));

//*************************************************************************
// print model input layer (node names, types, shape etc.)
size_t num_input_nodes;
OrtStatus* status;
OrtAllocator* allocator;
OrtCreateDefaultAllocator(&allocator);

// print number of model input nodes
status = OrtSessionGetInputCount(session, &num_input_nodes);
std::vector<const char*> input_node_names(num_input_nodes);
std::vector<size_t> input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}.
// Otherwise need vector<vector<>>

printf("Number of inputs = %zu\n", num_input_nodes);

// iterate over all input nodes
for (int i = 0; i < num_input_nodes; i++) {
// print input node names
char* input_name;
status = OrtSessionGetInputName(session, i, allocator, &input_name);
printf("Input %d : name=%s\n", i, input_name);
input_node_names[i] = input_name;

// print input node types
OrtTypeInfo* typeinfo;
status = OrtSessionGetInputTypeInfo(session, i, &typeinfo);
const OrtTensorTypeAndShapeInfo* tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
printf("Input %d : type=%d\n", i, type);

// print input shapes/dims
size_t num_dims = OrtGetNumOfDimensions(tensor_info);
printf("Input %d : num_dims=%zu\n", i, num_dims);
input_node_dims.resize(num_dims);
OrtGetDimensions(tensor_info, (int64_t*)input_node_dims.data(), num_dims);
for (int j = 0; j < num_dims; j++)
printf("Input %d : dim %d=%jd\n", i, j, input_node_dims[j]);

OrtReleaseTypeInfo(typeinfo);
}
OrtReleaseAllocator(allocator);

// Results should be...
// Number of inputs = 1
// Input 0 : name = data_0
// Input 0 : type = 1
// Input 0 : num_dims = 4
// Input 0 : dim 0 = 1
// Input 0 : dim 1 = 3
// Input 0 : dim 2 = 224
// Input 0 : dim 3 = 224

//*************************************************************************
// Similar operations to get output node information.
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName()
// OrtSessionGetOutputTypeInfo() as shown above.

//*************************************************************************
// Score the model using sample data, and inspect values

size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
// use OrtGetTensorShapeElementCount() to get official size!

std::vector<float> input_tensor_values(input_tensor_size);
std::vector<const char*> output_node_names = {"softmaxout_1"};

// initialize input data with values in [0.0, 1.0]
for (unsigned int i = 0; i < input_tensor_size; i++)
input_tensor_values[i] = (float)i / (input_tensor_size + 1);

// create input tensor object from data values
OrtAllocatorInfo* allocator_info;
CHECK_STATUS(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info));
OrtValue* input_tensor = NULL;
CHECK_STATUS(OrtCreateTensorWithDataAsOrtValue(allocator_info, input_tensor_values.data(), input_tensor_size * sizeof(float), input_node_dims.data(), 4, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor));
assert(OrtIsTensor(input_tensor));
OrtReleaseAllocatorInfo(allocator_info);

// score model & input tensor, get back output tensor
OrtValue* output_tensor = NULL;
CHECK_STATUS(OrtRun(session, NULL, input_node_names.data(), (const OrtValue* const*)&input_tensor, 1, output_node_names.data(), 1, &output_tensor));
assert(OrtIsTensor(output_tensor));

// Get pointer to output tensor float values
float* floatarr;
OrtGetTensorMutableData(output_tensor, (void**)&floatarr);
assert(abs(floatarr[0] - 0.000045) < 1e-6);

// score the model, and print scores for first 5 classes
for (int i = 0; i < 5; i++)
printf("Score for class [%d] = %f\n", i, floatarr[i]);

// Results should be as below...
// Score for class[0] = 0.000045
// Score for class[1] = 0.003846
// Score for class[2] = 0.000125
// Score for class[3] = 0.001180
// Score for class[4] = 0.001317

OrtReleaseValue(output_tensor);
OrtReleaseValue(input_tensor);
OrtReleaseSession(session);
OrtReleaseEnv(env);
printf("Done!\n");
return 0;
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

<PropertyGroup>
<OnnxRuntimeCsharpRoot>$(MSBuildThisFileDirectory)..\..</OnnxRuntimeCsharpRoot>
</PropertyGroup>
<Import Project="..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props" Condition="Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props')" />

<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{B8CA7F10-0171-4EA5-8662-5A9942DDF415}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MicrosoftMLOnnxRuntimeEndToEndTestsRunCapi</RootNamespace>
<WindowsTargetPlatformVersion>10.0.17763.0</WindowsTargetPlatformVersion>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="C_Api_Sample.cpp" />
</ItemGroup>
<ItemGroup>
<None Include="$(OnnxRuntimeCSharpRoot)\testdata\*">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
<Visible>false</Visible>
</None>
<None Include="packages.config" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
<Import Project="..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets" Condition="Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets')" />
</ImportGroup>
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup>
<ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
</PropertyGroup>
<Error Condition="!Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.props'))" />
<Error Condition="!Exists('..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Microsoft.ML.OnnxRuntime.$(CurrentOnnxRuntimeVersion)\build\native\Microsoft.ML.OnnxRuntime.targets'))" />
</Target>
</Project>
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ for /f "delims=" %%i in ('type "%templateFile%" ^& break ^> "packages.config" ')
echo on

REM Restore NuGet Packages
nuget restore -PackagesDirectory ..\packages -Source %LocalNuGetRepo% Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.vcxproj
nuget restore -PackagesDirectory ..\packages -Source %LocalNuGetRepo% Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.vcxproj
if NOT %ERRORLEVEL% EQU 0 (
echo "Error:Nuget restore failed"
popd
EXIT /B 1
)

REM Build Native project
msbuild Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.vcxproj
msbuild Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.vcxproj
if NOT %ERRORLEVEL% EQU 0 (
echo "Error:MSBuild failed to compile project"
popd
Expand All @@ -54,7 +54,8 @@ if NOT %ERRORLEVEL% EQU 0 (

REM Run Unit Tests
pushd x64\Debug
vstest.console.exe /platform:x64 Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.dll
REM vstest.console.exe /platform:x64 Microsoft.ML.OnnxRuntime.EndToEndTests.Capi.dll
.\Microsoft.ML.OnnxRuntime.EndToEndTests.RunCapi.exe
if NOT %ERRORLEVEL% EQU 0 (
echo "Unit test failure: %ERRORLEVEL%"
popd
Expand Down
16 changes: 6 additions & 10 deletions docs/AddingCustomOp.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,12 @@ Adding a new op
===============

## A new op can be written and registered with ONNXRuntime in the following 3 ways
### 1. Using a dynamic shared library
* First write the implementation of the op and schema (if required) and assemble them in a shared library.
See [this](../onnxruntime/test/custom_op_shared_lib) for an example. Currently
this is supported for Linux only.

Example of creating a shared lib using g++ on Linux:
```g++ -std=c++14 -shared test_custom_op.cc -o test_custom_op.so -fPIC -I. -Iinclude/onnxruntime -L. -lonnxruntime -DONNX_ML -DONNX_NAMESPACE=onnx```

* Register the shared lib with ONNXRuntime.
See [this](../onnxruntime/test/shared_lib/test_inference.cc) for an example.
### 1. Using the experimental custom op API in the C API (onnxruntime_c_api.h)
Note: These APIs are experimental and will change in the next release. They're released now for feedback and experimentation.
* Create an OrtCustomOpDomain with the domain name used by the custom ops
* Create an OrtCustomOp structure for each op and add them to the OrtCustomOpDomain with OrtCustomOpDomain_Add
* Call OrtAddCustomOpDomain to add the custom domain of ops to the session options
See [this](../onnxruntime/test/custom_op_shared_lib/test_custom_op.cc) for an example.

### 2. Using RegisterCustomRegistry API
* Implement your kernel and schema (if required) using the OpKernel and OpSchema APIs (headers are in the include folder).
Expand Down
6 changes: 6 additions & 0 deletions docs/CSharp_API.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ The ONNX runtime provides a C# .Net binding for running inference on ONNX models
## NuGet Package
The Microsoft.ML.OnnxRuntime Nuget package includes the precompiled binaries for ONNX runtime, and includes libraries for Windows and Linux platforms with X64 CPUs. The APIs conform to .Net Standard 1.1.

## Sample Code

The unit tests contain several examples of loading models, inspecting input/output node shapes and types, as well as constructing tensors for scoring.

* [../csharp/test/Microsoft.ML.OnnxRuntime.Tests/InferenceTest.cs#L54](../csharp/test/Microsoft.ML.OnnxRuntime.Tests/InferenceTest.cs#L54)

## Getting Started
Here is simple tutorial for getting started with running inference on an existing ONNX model for a given input data. The model is typically trained using any of the well-known training frameworks and exported into the ONNX format. To start scoring using the model, open a session using the `InferenceSession` class, passing in the file path to the model as a parameter.

Expand Down
Loading

0 comments on commit e09e156

Please sign in to comment.