diff --git a/CODEOWNERS b/CODEOWNERS
index d09058ed94f7f..e1f79a504ad8b 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -1,6 +1,5 @@
# Mobile
-/onnxruntime/test/testdata/kernel_def_hashes/ @microsoft/onnxruntime-mobile
-/onnxruntime/core/framework/kernel_def_hash_helpers.* @microsoft/onnxruntime-mobile
+/onnxruntime/core/flatbuffers/schema/ort.fbs @microsoft/onnxruntime-mobile
# Contrib Ops
/onnxruntime/core/graph/contrib_ops/nhwc_schema_defs.cc @microsoft/onnxruntime-mlas
diff --git a/docs/ORT_Format_Update_in_1.13.md b/docs/ORT_Format_Update_in_1.13.md
new file mode 100644
index 0000000000000..fa67da927a1ef
--- /dev/null
+++ b/docs/ORT_Format_Update_in_1.13.md
@@ -0,0 +1,12 @@
+# ORT Format Update in 1.13
+
+In ONNX Runtime 1.13, there was a breaking change to the
+[ORT format](https://onnxruntime.ai/docs/reference/ort-format-models.html) in order to enable additional execution
+providers with statically registered kernels in a minimal build.
+More details can be found [here](../onnxruntime/core/flatbuffers/schema/README.md#version-5).
+
+Unfortunately, this means that any older models (prior to ORT format version 5) will no longer work with ONNX Runtime
+1.13 or later and must be re-converted.
+Please refer
+[here](https://onnxruntime.ai/docs/reference/ort-format-models.html#convert-onnx-models-to-ort-format) for instructions
+on how to convert an ONNX model to ORT format.
diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md
index 680979ca8a2cf..dc8f312190163 100644
--- a/docs/OperatorKernels.md
+++ b/docs/OperatorKernels.md
@@ -418,7 +418,7 @@ Do not modify directly.*
|Inverse|*in* X:**T**
*out* Y:**T**|1+|**T** = tensor(double), tensor(float), tensor(float16)|
|MatMulInteger16|*in* A:**T1**
*in* B:**T2**
*out* Y:**T3**|1+|**T1** = tensor(int16)
**T2** = tensor(int16)
**T3** = tensor(int32)|
|MatMulIntegerToFloat|*in* A:**T1**
*in* B:**T2**
*in* a_scale:**T3**
*in* b_scale:**T3**
*in* a_zero_point:**T1**
*in* b_zero_point:**T2**
*in* bias:**T3**
*out* Y:**T3**|1+|**T1** = tensor(int8), tensor(uint8)
**T2** = tensor(int8), tensor(uint8)
**T3** = tensor(float)|
-|MaxpoolWithMask|*in* X:**T**
*in* M:**tensor(int32)**
*out* Y:**T**|1+|**X** = tensor(float)|
+|MaxpoolWithMask|*in* X:**T**
*in* M:**tensor(int32)**
*out* Y:**T**|1+|**T** = tensor(float)|
|MurmurHash3|*in* X:**T1**
*out* Y:**T2**|1+|**T1** = tensor(double), tensor(float), tensor(int32), tensor(int64), tensor(string), tensor(uint32), tensor(uint64)
**T2** = tensor(int32), tensor(uint32)|
|NGramRepeatBlock|*in* input_ids:**Tid**
*in* scores:**T**
*out* scores_out:**T**|1+|**T** = tensor(float)
**Tid** = tensor(int64)|
|NhwcMaxPool|*in* x:**T**
*out* y:**T**|1+|**T** = tensor(int8), tensor(uint8)|
diff --git a/include/onnxruntime/core/common/common.h b/include/onnxruntime/core/common/common.h
index 501634bf32509..d411ed2451cf3 100644
--- a/include/onnxruntime/core/common/common.h
+++ b/include/onnxruntime/core/common/common.h
@@ -17,9 +17,10 @@
#pragma once
-#include
-#include
#include
+#include
+#include
+#include
#include
#include
#include
@@ -28,8 +29,8 @@
#include
#include
#include
+#include
#include
-#include
#include "core/common/code_location.h"
#include "core/common/exceptions.h"
@@ -279,9 +280,10 @@ constexpr size_t kMaxStrLen = 2048;
// Returns whether `key` is in `container`.
// Like C++20's map/set contains() member function.
template typename AssociativeContainer>
-inline bool Contains(const AssociativeContainer& container, const Key& key) {
- return container.find(key) != container.end();
+ template typename AssociativeContainer,
+ typename LookupKey>
+inline bool Contains(const AssociativeContainer& container, LookupKey&& key) {
+ return container.find(std::forward(key)) != container.end();
}
} // namespace onnxruntime
diff --git a/include/onnxruntime/core/common/hash_combine.h b/include/onnxruntime/core/common/hash_combine.h
new file mode 100644
index 0000000000000..5662a329ea77f
--- /dev/null
+++ b/include/onnxruntime/core/common/hash_combine.h
@@ -0,0 +1,21 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+#pragma once
+
+namespace onnxruntime {
+
+// Combine hash value `seed` with hash value `h`, updating `seed` in place.
+// TODO(edgchen1) find a better implementation? e.g., see a more recent version of boost::hash_combine()
+inline void HashCombineWithHashValue(size_t h, size_t& seed) {
+ seed ^= h + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+
+// Combine hash value `seed` with the hash value of `value`, updating `seed` in place.
+// The hash value computation is specified by the `Hash` template parameter.
+template >
+inline void HashCombine(const T& value, size_t& seed) {
+ HashCombineWithHashValue(Hash{}(value), seed);
+}
+
+} // namespace onnxruntime
diff --git a/include/onnxruntime/core/common/parse_string.h b/include/onnxruntime/core/common/parse_string.h
index edb34724f1929..941e3f3377ecc 100644
--- a/include/onnxruntime/core/common/parse_string.h
+++ b/include/onnxruntime/core/common/parse_string.h
@@ -5,6 +5,7 @@
#include
#include
+#include
#include
#include "core/common/common.h"
@@ -15,7 +16,7 @@ namespace onnxruntime {
* Tries to parse a value from an entire string.
*/
template
-bool TryParseStringWithClassicLocale(const std::string& str, T& value) {
+bool TryParseStringWithClassicLocale(std::string_view str, T& value) {
if constexpr (std::is_integral::value && std::is_unsigned::value) {
// if T is unsigned integral type, reject negative values which will wrap
if (!str.empty() && str[0] == '-') {
@@ -28,7 +29,7 @@ bool TryParseStringWithClassicLocale(const std::string& str, T& value) {
return false;
}
- std::istringstream is{str};
+ std::istringstream is{std::string{str}};
is.imbue(std::locale::classic());
T parsed_value{};
@@ -43,12 +44,12 @@ bool TryParseStringWithClassicLocale(const std::string& str, T& value) {
return true;
}
-inline bool TryParseStringWithClassicLocale(const std::string& str, std::string& value) {
+inline bool TryParseStringWithClassicLocale(std::string_view str, std::string& value) {
value = str;
return true;
}
-inline bool TryParseStringWithClassicLocale(const std::string& str, bool& value) {
+inline bool TryParseStringWithClassicLocale(std::string_view str, bool& value) {
if (str == "0" || str == "False" || str == "false") {
value = false;
return true;
@@ -66,7 +67,7 @@ inline bool TryParseStringWithClassicLocale(const std::string& str, bool& value)
* Parses a value from an entire string.
*/
template
-Status ParseStringWithClassicLocale(const std::string& s, T& value) {
+Status ParseStringWithClassicLocale(std::string_view s, T& value) {
ORT_RETURN_IF_NOT(TryParseStringWithClassicLocale(s, value), "Failed to parse value: \"", value, "\"");
return Status::OK();
}
@@ -75,7 +76,7 @@ Status ParseStringWithClassicLocale(const std::string& s, T& value) {
* Parses a value from an entire string.
*/
template
-T ParseStringWithClassicLocale(const std::string& s) {
+T ParseStringWithClassicLocale(std::string_view s) {
T value{};
ORT_THROW_IF_ERROR(ParseStringWithClassicLocale(s, value));
return value;
diff --git a/include/onnxruntime/core/framework/data_types.h b/include/onnxruntime/core/framework/data_types.h
index 80086a31379d5..f4ca87eb9a5ef 100644
--- a/include/onnxruntime/core/framework/data_types.h
+++ b/include/onnxruntime/core/framework/data_types.h
@@ -458,6 +458,7 @@ class TensorType : public TensorTypeBase {
#if defined(DISABLE_OPTIONAL_TYPE)
+// TODO is this still needed after removing kernel def hashes?
/// Common base-class for all disabled types. We need DataTypeImpl::ToString to work in a minimal build
/// with disabled types to keep the ORT format model kernel hashes stable.
class DisabledTypeBase : public DataTypeImpl {
diff --git a/include/onnxruntime/core/framework/execution_provider.h b/include/onnxruntime/core/framework/execution_provider.h
index 6d3fa92f9f7d1..57e9d5e564206 100644
--- a/include/onnxruntime/core/framework/execution_provider.h
+++ b/include/onnxruntime/core/framework/execution_provider.h
@@ -15,11 +15,10 @@
namespace onnxruntime {
class GraphViewer;
-class Node;
struct ComputeCapability;
class KernelRegistry;
-class KernelRegistryManager;
-
+struct KernelCreateInfo;
+class Node;
} // namespace onnxruntime
#else
#include
@@ -89,6 +88,19 @@ class IExecutionProvider {
return nullptr;
}
+ /**
+ * Interface for performing kernel lookup within kernel registries.
+ * Abstracts away lower-level details about kernel registries and kernel matching.
+ */
+ class IKernelLookup {
+ public:
+ /**
+ * Given `node`, try to find a matching kernel for this EP.
+ * The return value is non-null if and only if a matching kernel was found.
+ */
+ virtual const KernelCreateInfo* LookUpKernel(const Node& node) const = 0;
+ };
+
/**
Get execution provider's capability for the specified .
Return a bunch of IndexedSubGraphs <*this> execution provider can run if
@@ -96,22 +108,24 @@ class IExecutionProvider {
contains more than one node. The node indexes contained in sub-graphs may
have overlap, and it's ONNXRuntime's responsibility to do the partition
and decide whether a node will be assigned to <*this> execution provider.
+ For kernels registered in a kernel registry, `kernel_lookup` must be used
+ to find a matching kernel for this EP.
*/
virtual std::vector>
GetCapability(const onnxruntime::GraphViewer& graph_viewer,
- const std::vector& kernel_registries) const;
+ const IKernelLookup& kernel_lookup) const;
/**
Get kernel registry per execution provider type.
The KernelRegistry share pointer returned is shared across sessions.
- NOTE: this is a tricky but final solution to achieve following goals,
+ NOTE: this approach was taken to achieve the following goals,
1. The execution provider type based kernel registry should be shared
across sessions.
Only one copy of this kind of kernel registry exists in ONNXRuntime
with multiple sessions/models.
2. Adding an execution provider into ONNXRuntime does not need to touch ONNXRuntime
- frameowrk/session code.
+ framework/session code.
3. onnxruntime (framework/session) does not depend on any specific
execution provider lib.
*/
diff --git a/include/onnxruntime/core/framework/kernel_def_builder.h b/include/onnxruntime/core/framework/kernel_def_builder.h
index 8fc45889c8ad3..8b6a2571a5587 100644
--- a/include/onnxruntime/core/framework/kernel_def_builder.h
+++ b/include/onnxruntime/core/framework/kernel_def_builder.h
@@ -53,13 +53,15 @@ class KernelDef {
return provider_type_;
}
+ // TODO(edgchen1) do we need both TypeConstraints() and EnabledTypeConstraints()?
+
// type constraints with types supported by default
- const std::map>& TypeConstraints() const {
+ const std::unordered_map>& TypeConstraints() const {
return default_type_constraints_;
}
// type constraints with types supported in this build
- const std::map>& EnabledTypeConstraints() const {
+ const std::unordered_map>& EnabledTypeConstraints() const {
return enabled_type_constraints_;
}
@@ -108,19 +110,9 @@ class KernelDef {
bool IsConflict(const KernelDef& other) const;
- HashValue GetHash() const noexcept {
- // if we need to support different hash versions we can update CalculateHash to take a version number
- // and calculate any non-default versions dynamically. we only use this during kernel lookup so
- // it's not performance critical
- return hash_;
- }
-
private:
friend class KernelDefBuilder;
- // called once by KernelDefBuilder::Build
- void CalculateHash();
-
// The operator name supported by <*this> kernel..
std::string op_name_;
@@ -139,18 +131,11 @@ class KernelDef {
std::string provider_type_;
// The data types that are supported by default for inputs/outputs.
- // Key is input/output name defined in op schema, Value are supported types.
- // note: std::map as we need the order to be deterministic for the hash
- // Note: default_type_constraints_ are used to calculate the kernel hash so that the hash is
- // stable across builds with and without kernel type reduction enabled.
- std::map> default_type_constraints_;
+ // Key is input/output/type constraint name defined in op schema, Value are supported types.
+ std::unordered_map> default_type_constraints_;
// the type constraints that are supported in this build (enabled) for the kernel
- std::map> enabled_type_constraints_;
-
- // optional alternate type constraints to use to calculate the hash instead of default_type_constraints_
- // note: this provides a way to update the default type constraints while preserving the hash value
- optional>> hash_type_constraints_;
+ std::unordered_map> enabled_type_constraints_;
// An element means that output j reuses the memory of input i.
std::vector> inplace_map_;
@@ -186,9 +171,6 @@ class KernelDef {
OrtMemType default_inputs_mem_type_{OrtMemTypeDefault};
// Default memory type for all outputs
OrtMemType default_outputs_mem_type_{OrtMemTypeDefault};
-
- // hash of kernel definition for lookup in minimal build
- HashValue hash_ = 0;
};
class KernelDefBuilder {
@@ -259,17 +241,6 @@ class KernelDefBuilder {
KernelDefBuilder& TypeConstraint(const std::string& arg_name, MLDataType default_type);
KernelDefBuilder& TypeConstraint(const char* arg_name, MLDataType default_type);
- /**
- Specify the original set of types that this kernel supports by default to use when computing the kernel def hash.
- The set of types supported by default may change over time, but the hash should stay the same.
- */
- KernelDefBuilder& FixedTypeConstraintForHash(
- const std::string& arg_name,
- const std::vector& default_types_for_hash);
- KernelDefBuilder& FixedTypeConstraintForHash(
- const char* arg_name,
- const std::vector& default_types_for_hash);
-
/**
Inplace mapping from inputs to outputs allowed.
It means that uplayer runtime could do memory in-place optimization
@@ -392,7 +363,6 @@ class KernelDefBuilder {
Return the kernel definition, passing ownership of the KernelDef to the caller
*/
std::unique_ptr Build() {
- kernel_def_->CalculateHash();
return std::move(kernel_def_);
}
diff --git a/include/onnxruntime/core/framework/kernel_registry.h b/include/onnxruntime/core/framework/kernel_registry.h
index 224ce00bc7028..68b610ac4b278 100644
--- a/include/onnxruntime/core/framework/kernel_registry.h
+++ b/include/onnxruntime/core/framework/kernel_registry.h
@@ -3,6 +3,8 @@
#pragma once
+#include
+
#include "core/framework/op_kernel.h"
namespace onnxruntime {
@@ -10,9 +12,10 @@ namespace onnxruntime {
using KernelCreateMap = std::multimap;
using KernelDefHashes = std::vector>;
+class IKernelTypeStrResolver;
+
/**
* Each provider has a KernelRegistry. Often, the KernelRegistry only belongs to that specific provider.
- *
*/
class KernelRegistry {
public:
@@ -23,37 +26,28 @@ class KernelRegistry {
Status Register(KernelCreateInfo&& create_info);
-#if !defined(ORT_MINIMAL_BUILD)
- static bool HasImplementationOf(const KernelRegistry& r, const Node& node,
- ProviderType exec_provider) {
- const KernelCreateInfo* info;
- Status st = r.TryFindKernel(node, exec_provider, &info);
- return st.IsOK();
- }
-
- // factory functions should always return a unique_ptr for maximum flexibility
- // for its clients unless the factory is managing the lifecycle of the pointer
- // itself.
- // TODO(Task:132) Make usage of unique_ptr/shared_ptr as out param consistent
- Status TryCreateKernel(const Node& node, const IExecutionProvider& execution_provider,
- const std::unordered_map& constant_initialized_tensors,
- const OrtValueNameIdxMap& mlvalue_name_idx_map, FuncManager& funcs_mgr,
- const DataTransferManager& data_transfer_mgr,
- std::unique_ptr& op_kernel) const;
+ // TODO(edgchen1) for TryFindKernel(), consider using `out` != nullptr as indicator of whether kernel was found and
+ // Status as an indication of failure
// Check if an execution provider can create kernel for a node and return the kernel if so
Status TryFindKernel(const Node& node, ProviderType exec_provider,
+ const IKernelTypeStrResolver& kernel_type_str_resolver,
const KernelCreateInfo** out) const;
+ static bool HasImplementationOf(const KernelRegistry& r, const Node& node,
+ ProviderType exec_provider,
+ const IKernelTypeStrResolver& kernel_type_str_resolver) {
+ const KernelCreateInfo* info;
+ Status st = r.TryFindKernel(node, exec_provider, kernel_type_str_resolver, &info);
+ return st.IsOK();
+ }
+
+#if !defined(ORT_MINIMAL_BUILD)
// Find KernelCreateInfo in instant mode
Status TryFindKernel(const std::string& op_name, const std::string& domain, const int& version,
const std::unordered_map& type_constraints,
ProviderType exec_provider, const KernelCreateInfo** out) const;
-
-#endif
-
- // Try to find the kernel given a kernel def hash.
- bool TryFindKernelByHash(HashValue kernel_def_hash, const KernelCreateInfo** out) const;
+#endif // !defined(ORT_MINIMAL_BUILD)
bool IsEmpty() const { return kernel_creator_fn_map_.empty(); }
@@ -64,11 +58,7 @@ class KernelRegistry {
}
#endif
- // Get sorted kernel def key and hash pairs.
- KernelDefHashes ExportKernelDefHashes() const;
-
private:
-#if !defined(ORT_MINIMAL_BUILD)
// Check whether the types of inputs/outputs of the given node match the extra
// type-constraints of the given kernel. This serves two purposes: first, to
// select the right kernel implementation based on the types of the arguments
@@ -79,16 +69,12 @@ class KernelRegistry {
//
// Note that this is not intended for type-checking the node against the ONNX
// type specification of the corresponding op, which is done before this check.
- //
- // if this function is called before graph partition, then node.provider is not set.
- // In this case, kernel_def.provider must equal to exec_provider
- // otherwise, kernel_def.provider must equal to node.provider. exec_provider is ignored.
static bool VerifyKernelDef(const Node& node,
const KernelDef& kernel_def,
+ const IKernelTypeStrResolver& kernel_type_str_resolver,
std::string& error_str);
-#endif
- static std::string GetMapKey(const std::string& op_name, const std::string& domain, const std::string& provider) {
+ static std::string GetMapKey(std::string_view op_name, std::string_view domain, std::string_view provider) {
std::string key(op_name);
// use the kOnnxDomainAlias of 'ai.onnx' instead of kOnnxDomain's empty string
key.append(1, ' ').append(domain.empty() ? kOnnxDomainAlias : domain).append(1, ' ').append(provider);
@@ -101,8 +87,5 @@ class KernelRegistry {
// Kernel create function map from op name to kernel creation info.
// key is opname+domain_name+provider_name
KernelCreateMap kernel_creator_fn_map_;
-
- // map from kernel def hash to entry in kernel_creator_fn_map_
- std::unordered_map kernel_def_hash_lookup_;
};
} // namespace onnxruntime
diff --git a/include/onnxruntime/core/framework/ortmemoryinfo.h b/include/onnxruntime/core/framework/ortmemoryinfo.h
index 32b7aab7c4118..6dd259ddc15e4 100644
--- a/include/onnxruntime/core/framework/ortmemoryinfo.h
+++ b/include/onnxruntime/core/framework/ortmemoryinfo.h
@@ -3,6 +3,10 @@
#pragma once
+#include
+
+#include "core/common/hash_combine.h"
+
struct OrtMemoryInfo {
OrtMemoryInfo() = default; // to allow default construction of Tensor
@@ -38,17 +42,13 @@ struct OrtMemoryInfo {
return strcmp(name, other.name) < 0;
}
- static void HashCombine(size_t h, size_t& seed) {
- seed ^= h + 0x9e3779b9 + (seed << 6) + (seed >> 2);
- }
-
// This is to make OrtMemoryInfo a valid key in hash tables
// we ignore device id
size_t Hash() const {
auto h = std::hash()(alloc_type);
- HashCombine(std::hash()(mem_type), h);
- HashCombine(std::hash()(id), h);
- HashCombine(std::hash()(name), h);
+ onnxruntime::HashCombine(mem_type, h);
+ onnxruntime::HashCombine(id, h);
+ onnxruntime::HashCombine(name, h);
return h;
}
diff --git a/include/onnxruntime/core/graph/graph.h b/include/onnxruntime/core/graph/graph.h
index 8e930ce5a8146..7dda32de41b94 100644
--- a/include/onnxruntime/core/graph/graph.h
+++ b/include/onnxruntime/core/graph/graph.h
@@ -1333,20 +1333,6 @@ class Graph {
RuntimeOptimizationRecordContainer& MutableRuntimeOptimizations() {
return runtime_optimizations_;
}
-
- // Stores information collected during the replay of loaded runtime optimizations
- struct RuntimeOptimizationReplayContext {
- std::unordered_map produced_node_index_to_kernel_def_hash{};
- size_t num_replayed_optimizations{};
- };
-
- const RuntimeOptimizationReplayContext& RuntimeOptimizationReplayCtx() const {
- return runtime_optimization_replay_context_;
- }
-
- RuntimeOptimizationReplayContext& MutableRuntimeOptimizationReplayCtx() {
- return runtime_optimization_replay_context_;
- }
#endif // !defined(ORT_MINIMAL_BUILD) || defined(ORT_EXTENDED_MINIMAL_BUILD)
// This friendship relationship should only be used to call Graph::Graph and
@@ -1588,8 +1574,6 @@ class Graph {
// Note: runtime_optimizations_ == *runtime_optimizations_ptr_ and must be initialized
std::unique_ptr runtime_optimizations_ptr_;
RuntimeOptimizationRecordContainer& runtime_optimizations_;
-
- RuntimeOptimizationReplayContext runtime_optimization_replay_context_;
#endif // !defined(ORT_MINIMAL_BUILD) || defined(ORT_EXTENDED_MINIMAL_BUILD)
#if !defined(ORT_MINIMAL_BUILD)
diff --git a/java/src/test/android/README.md b/java/src/test/android/README.md
index c5658be38660e..874ba7ba729fd 100644
--- a/java/src/test/android/README.md
+++ b/java/src/test/android/README.md
@@ -1,17 +1,18 @@
# Android Test Application for ORT-Mobile
-This directory contains a simple android application for testing [ONNX Runtime AAR package](https://www.onnxruntime.ai/docs/how-to/build.html#build-android-archive-aar).
+This directory contains a simple android application for testing [ONNX Runtime AAR package](https://onnxruntime.ai/docs/build/android.html#build-android-archive-aar).
## Background
-For general usage and build purpose of ORT-Mobile Android, please see the [documentation](https://www.onnxruntime.ai/docs/how-to/build.html#android) here.
+For general usage and build purpose of ORT-Mobile Android, please see the [documentation](https://onnxruntime.ai/docs/tutorials/mobile/) here.
### Test Android Application Overview
This android application is mainly aimed for testing:
- Model used: A simple [sigmoid ONNX model](https://github.com/onnx/onnx/blob/f9b0cc99344869c246b8f4011b8586a39841284c/onnx/backend/test/data/node/test_sigmoid/model.onnx) (converted to ORT format under `app\src\androidTest\assets` folder).
- - Here's a [documentation](https://github.com/microsoft/onnxruntime/blob/main/docs/ONNX_Runtime_for_Mobile_Platforms.md#1-create-ort-format-model-and-configuration-file-with-required-operators) about how you can convert an ONNX model into ORT format.
+ - Here's [documentation](https://onnxruntime.ai/docs/reference/ort-format-models.html#convert-onnx-models-to-ort-format) about how you can convert an ONNX model into ORT format.
+ - Run `python -m onnxruntime.tools.convert_onnx_models_to_ort --optimization_style=Fixed /path/to/model.onnx` and rename the resulting .ort file accordingly.
- Main test file: An android instrumentation test under `app\src\androidtest\java\ai.onnxruntime.example.javavalidator\SimpleTest.kt`
- The main dependency of this application is `onnxruntime` aar package under `app\libs`.
- The MainActivity of this application is set to be empty.
@@ -23,7 +24,7 @@ This android application is mainly aimed for testing:
### Building
-Use the android's [build instructions](https://www.onnxruntime.ai/docs/how-to/build.html#android-build-instructions) with `--build_java` and `--android_run_emulator` option.
+Use the android's [build instructions](https://onnxruntime.ai/docs/build/android.html) with `--build_java` and `--android_run_emulator` option.
Please note that you may need to set the `--android_abi=x86_64` (the default option is `arm64-v8a`). This is because android instrumentation test is run on an android emulator which requires an abi of `x86_64`.
diff --git a/java/src/test/android/app/src/androidTest/assets/sigmoid.ort b/java/src/test/android/app/src/androidTest/assets/sigmoid.ort
index 6336fed141a5e..70d7659bbee25 100644
Binary files a/java/src/test/android/app/src/androidTest/assets/sigmoid.ort and b/java/src/test/android/app/src/androidTest/assets/sigmoid.ort differ
diff --git a/js/README.md b/js/README.md
index d5b55af6e1452..9f1a6150a70ae 100644
--- a/js/README.md
+++ b/js/README.md
@@ -408,7 +408,7 @@ By default, ONNX Runtime React Native leverages ONNX Runtime Mobile package with
yarn bootstrap
```
- When testing with a custom built ONNX Runtime Android package, copy `/aar_out/MinSizeRel/com/microsoft/onnxruntime/onnxruntime-mobile//onnxruntime-mobile-.aar` into `/js/react_native/e2e/node_modules/onnxruntime-react-native/android/libs` directory. Using a custom built ONNX Runtime iOS package, copy `onnxruntime-mobile-c.zip` into `/js/react_native/local_pods` directory if it's not already done.
+ When testing with a custom built ONNX Runtime Android package, copy `/aar_out/MinSizeRel/com/microsoft/onnxruntime/onnxruntime-mobile//onnxruntime-mobile-.aar` into `/js/react_native/e2e/android/app/libs` directory. Using a custom built ONNX Runtime iOS package, copy `onnxruntime-mobile-c.zip` into `/js/react_native/local_pods` directory if it's not already done.
From `/js/react_native/e2e/android`, run e2e Android tests as follows,
diff --git a/js/node/test/e2e/simple-e2e-tests.ts b/js/node/test/e2e/simple-e2e-tests.ts
index dbbcdfcf8df09..70ac6ca1e0f94 100644
--- a/js/node/test/e2e/simple-e2e-tests.ts
+++ b/js/node/test/e2e/simple-e2e-tests.ts
@@ -11,73 +11,73 @@ import {assertDataEqual, TEST_DATA_ROOT} from '../test-utils';
const MODEL_TEST_TYPES_CASES:
Array<{model: string; type: Tensor.Type; input0: Tensor.DataType; expectedOutput0: Tensor.DataType}> = [
{
- model: path.join(TEST_DATA_ROOT, 'test_types_BOOL.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_bool.onnx'),
type: 'bool',
input0: Uint8Array.from([1, 0, 0, 1, 0]),
expectedOutput0: Uint8Array.from([1, 0, 0, 1, 0])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_DOUBLE.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_double.onnx'),
type: 'float64',
input0: Float64Array.from([1.0, 2.0, 3.0, 4.0, 5.0]),
expectedOutput0: Float64Array.from([1.0, 2.0, 3.0, 4.0, 5.0])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_FLOAT.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_float.onnx'),
type: 'float32',
input0: Float32Array.from([1.0, 2.0, 3.0, 4.0, 5.0]),
expectedOutput0: Float32Array.from([1.0, 2.0, 3.0, 4.0, 5.0])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_INT8.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_int8.onnx'),
type: 'int8',
input0: Int8Array.from([1, -2, 3, 4, -5]),
expectedOutput0: Int8Array.from([1, -2, 3, 4, -5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_INT16.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_int16.onnx'),
type: 'int16',
input0: Int16Array.from([1, -2, 3, 4, -5]),
expectedOutput0: Int16Array.from([1, -2, 3, 4, -5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_INT32.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_int32.onnx'),
type: 'int32',
input0: Int32Array.from([1, -2, 3, 4, -5]),
expectedOutput0: Int32Array.from([1, -2, 3, 4, -5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_INT64.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_int64.onnx'),
type: 'int64',
input0: BigInt64Array.from([BigInt(1), BigInt(-2), BigInt(3), BigInt(4), BigInt(-5)]),
expectedOutput0: BigInt64Array.from([BigInt(1), BigInt(-2), BigInt(3), BigInt(4), BigInt(-5)])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_STRING.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_string.onnx'),
type: 'string',
input0: ['a', 'b', 'c', 'd', 'e'],
expectedOutput0: ['a', 'b', 'c', 'd', 'e']
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_UINT8.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_uint8.onnx'),
type: 'uint8',
input0: Uint8Array.from([1, 2, 3, 4, 5]),
expectedOutput0: Uint8Array.from([1, 2, 3, 4, 5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_UINT16.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_uint16.onnx'),
type: 'uint16',
input0: Uint16Array.from([1, 2, 3, 4, 5]),
expectedOutput0: Uint16Array.from([1, 2, 3, 4, 5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_UINT32.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_uint32.onnx'),
type: 'uint32',
input0: Uint32Array.from([1, 2, 3, 4, 5]),
expectedOutput0: Uint32Array.from([1, 2, 3, 4, 5])
},
{
- model: path.join(TEST_DATA_ROOT, 'test_types_UINT64.pb'),
+ model: path.join(TEST_DATA_ROOT, 'test_types_uint64.onnx'),
type: 'uint64',
input0: BigUint64Array.from([BigInt(1), BigInt(2), BigInt(3), BigInt(4), BigInt(5)]),
expectedOutput0: BigUint64Array.from([BigInt(1), BigInt(2), BigInt(3), BigInt(4), BigInt(5)])
diff --git a/js/node/test/test-utils.ts b/js/node/test/test-utils.ts
index d2da45566d698..ffb19c7bb2173 100644
--- a/js/node/test/test-utils.ts
+++ b/js/node/test/test-utils.ts
@@ -82,7 +82,7 @@ export function warmup(): void {
this.timeout(0);
// we have test cases to verify correctness in other place, so do no check here.
try {
- const session = await InferenceSession.create(path.join(TEST_DATA_ROOT, 'test_types_INT32.pb'));
+ const session = await InferenceSession.create(path.join(TEST_DATA_ROOT, 'test_types_int32.onnx'));
await session.run({input: new Tensor(new Float32Array(5), [1, 5])}, {output: null}, {});
} catch (e) {
}
diff --git a/js/node/test/testdata/test_types_BOOL.pb b/js/node/test/testdata/test_types_BOOL.pb
deleted file mode 100644
index 2c58b06d0aa6d..0000000000000
Binary files a/js/node/test/testdata/test_types_BOOL.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_DOUBLE.pb b/js/node/test/testdata/test_types_DOUBLE.pb
deleted file mode 100644
index 65ebf0f848a35..0000000000000
Binary files a/js/node/test/testdata/test_types_DOUBLE.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_FLOAT.pb b/js/node/test/testdata/test_types_FLOAT.pb
deleted file mode 100644
index b4ad9807834ed..0000000000000
Binary files a/js/node/test/testdata/test_types_FLOAT.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_FLOAT16.pb b/js/node/test/testdata/test_types_FLOAT16.pb
deleted file mode 100644
index f671bb7ce3253..0000000000000
Binary files a/js/node/test/testdata/test_types_FLOAT16.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_INT16.pb b/js/node/test/testdata/test_types_INT16.pb
deleted file mode 100644
index f297ec9c54940..0000000000000
Binary files a/js/node/test/testdata/test_types_INT16.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_INT32.pb b/js/node/test/testdata/test_types_INT32.pb
deleted file mode 100644
index 73bb539cf44c6..0000000000000
Binary files a/js/node/test/testdata/test_types_INT32.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_INT64.pb b/js/node/test/testdata/test_types_INT64.pb
deleted file mode 100644
index ccf8df0033278..0000000000000
Binary files a/js/node/test/testdata/test_types_INT64.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_INT8.pb b/js/node/test/testdata/test_types_INT8.pb
deleted file mode 100644
index 72698a779578d..0000000000000
Binary files a/js/node/test/testdata/test_types_INT8.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_STRING.pb b/js/node/test/testdata/test_types_STRING.pb
deleted file mode 100644
index 7c8b3e7e2eb82..0000000000000
Binary files a/js/node/test/testdata/test_types_STRING.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_UINT16.pb b/js/node/test/testdata/test_types_UINT16.pb
deleted file mode 100644
index 0a9c6fe3770ce..0000000000000
Binary files a/js/node/test/testdata/test_types_UINT16.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_UINT32.pb b/js/node/test/testdata/test_types_UINT32.pb
deleted file mode 100644
index 90efef3e7f171..0000000000000
Binary files a/js/node/test/testdata/test_types_UINT32.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_UINT64.pb b/js/node/test/testdata/test_types_UINT64.pb
deleted file mode 100644
index 53214a1a2e0e6..0000000000000
Binary files a/js/node/test/testdata/test_types_UINT64.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_UINT8.pb b/js/node/test/testdata/test_types_UINT8.pb
deleted file mode 100644
index 8b6a9c42197ef..0000000000000
Binary files a/js/node/test/testdata/test_types_UINT8.pb and /dev/null differ
diff --git a/js/node/test/testdata/test_types_bool.onnx b/js/node/test/testdata/test_types_bool.onnx
new file mode 100644
index 0000000000000..dc6753a4a0c72
Binary files /dev/null and b/js/node/test/testdata/test_types_bool.onnx differ
diff --git a/js/node/test/testdata/test_types_double.onnx b/js/node/test/testdata/test_types_double.onnx
new file mode 100644
index 0000000000000..c99dd3facf0fb
Binary files /dev/null and b/js/node/test/testdata/test_types_double.onnx differ
diff --git a/js/node/test/testdata/test_types_float.onnx b/js/node/test/testdata/test_types_float.onnx
new file mode 100644
index 0000000000000..91bdef98910ec
Binary files /dev/null and b/js/node/test/testdata/test_types_float.onnx differ
diff --git a/js/node/test/testdata/test_types_float16.onnx b/js/node/test/testdata/test_types_float16.onnx
new file mode 100644
index 0000000000000..b7dd3dd0c97fd
Binary files /dev/null and b/js/node/test/testdata/test_types_float16.onnx differ
diff --git a/js/node/test/testdata/test_types_int16.onnx b/js/node/test/testdata/test_types_int16.onnx
new file mode 100644
index 0000000000000..df14aef71b97f
Binary files /dev/null and b/js/node/test/testdata/test_types_int16.onnx differ
diff --git a/js/node/test/testdata/test_types_int32.onnx b/js/node/test/testdata/test_types_int32.onnx
new file mode 100644
index 0000000000000..3b0d8c3d677c8
Binary files /dev/null and b/js/node/test/testdata/test_types_int32.onnx differ
diff --git a/js/node/test/testdata/test_types_int64.onnx b/js/node/test/testdata/test_types_int64.onnx
new file mode 100644
index 0000000000000..5d35b7d74c63e
Binary files /dev/null and b/js/node/test/testdata/test_types_int64.onnx differ
diff --git a/js/node/test/testdata/test_types_int8.onnx b/js/node/test/testdata/test_types_int8.onnx
new file mode 100644
index 0000000000000..8a557e44d5272
Binary files /dev/null and b/js/node/test/testdata/test_types_int8.onnx differ
diff --git a/js/node/test/testdata/test_types_string.onnx b/js/node/test/testdata/test_types_string.onnx
new file mode 100644
index 0000000000000..8adebf144b89e
Binary files /dev/null and b/js/node/test/testdata/test_types_string.onnx differ
diff --git a/js/node/test/testdata/test_types_uint16.onnx b/js/node/test/testdata/test_types_uint16.onnx
new file mode 100644
index 0000000000000..aadac3651a656
Binary files /dev/null and b/js/node/test/testdata/test_types_uint16.onnx differ
diff --git a/js/node/test/testdata/test_types_uint32.onnx b/js/node/test/testdata/test_types_uint32.onnx
new file mode 100644
index 0000000000000..c3ad4da3e03e4
Binary files /dev/null and b/js/node/test/testdata/test_types_uint32.onnx differ
diff --git a/js/node/test/testdata/test_types_uint64.onnx b/js/node/test/testdata/test_types_uint64.onnx
new file mode 100644
index 0000000000000..af7b6378bca3a
Binary files /dev/null and b/js/node/test/testdata/test_types_uint64.onnx differ
diff --git a/js/node/test/testdata/test_types_uint8.onnx b/js/node/test/testdata/test_types_uint8.onnx
new file mode 100644
index 0000000000000..c57f3c8a61366
Binary files /dev/null and b/js/node/test/testdata/test_types_uint8.onnx differ
diff --git a/js/node/test/unittests/lib/inference-session.ts b/js/node/test/unittests/lib/inference-session.ts
index ffb5e0a48a3cb..d8d961cc94398 100644
--- a/js/node/test/unittests/lib/inference-session.ts
+++ b/js/node/test/unittests/lib/inference-session.ts
@@ -186,7 +186,7 @@ describe('UnitTests - InferenceSession.run()', () => {
});
describe('UnitTests - InferenceSession.SessionOptions', () => {
- const modelPath = path.join(__dirname, '../../testdata/test_types_FLOAT.pb');
+ const modelPath = path.join(__dirname, '../../testdata/test_types_float.onnx');
const createAny: any = InferenceSession.create;
it('BAD CALL - type mismatch', async () => {
@@ -323,7 +323,7 @@ describe('UnitTests - InferenceSession.RunOptions', () => {
const expectedOutput0 = new Tensor('float32', [1, 2, 3, 4, 5], [1, 5]);
before(async () => {
- const modelPath = path.join(__dirname, '../../testdata/test_types_FLOAT.pb');
+ const modelPath = path.join(__dirname, '../../testdata/test_types_float.onnx');
session = await InferenceSession.create(modelPath);
sessionAny = session;
});
diff --git a/js/react_native/android/src/androidTest/java/ai/onnxruntime/reactnative/TensorHelperTest.java b/js/react_native/android/src/androidTest/java/ai/onnxruntime/reactnative/TensorHelperTest.java
index 19c27441a3624..f508eccae4468 100644
--- a/js/react_native/android/src/androidTest/java/ai/onnxruntime/reactnative/TensorHelperTest.java
+++ b/js/react_native/android/src/androidTest/java/ai/onnxruntime/reactnative/TensorHelperTest.java
@@ -34,6 +34,7 @@
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.MockitoSession;
@@ -206,6 +207,7 @@ public void createInputTensor_double() throws Exception {
}
@Test
+ @Ignore("data type for Slice is not supported in mobile package")
public void createOutputTensor_bool() throws Exception {
MockitoSession mockSession = mockitoSession().mockStatic(Arguments.class).startMocking();
try {
@@ -246,6 +248,7 @@ public void createOutputTensor_bool() throws Exception {
}
@Test
+ @Ignore("data type for Slice is not supported in mobile package")
public void createOutputTensor_double() throws Exception {
MockitoSession mockSession = mockitoSession().mockStatic(Arguments.class).startMocking();
try {
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_bool.ort b/js/react_native/android/src/androidTest/res/raw/test_types_bool.ort
index 9267bee5abf76..e83b233e28255 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_bool.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_bool.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_double.ort b/js/react_native/android/src/androidTest/res/raw/test_types_double.ort
index 9030eca3e4ca1..94f20e0f421f4 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_double.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_double.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_float.ort b/js/react_native/android/src/androidTest/res/raw/test_types_float.ort
index 01f489572ba4a..e5c40742843d5 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_float.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_float.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_int32.ort b/js/react_native/android/src/androidTest/res/raw/test_types_int32.ort
index 8aecad72cfcf7..6135c9a4aca7c 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_int32.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_int32.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_int64.ort b/js/react_native/android/src/androidTest/res/raw/test_types_int64.ort
index b84dc2944bae5..a9892d9ec598d 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_int64.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_int64.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_int8.ort b/js/react_native/android/src/androidTest/res/raw/test_types_int8.ort
index d762f2c5eb22d..f1bf199e488e1 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_int8.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_int8.ort differ
diff --git a/js/react_native/android/src/androidTest/res/raw/test_types_uint8.ort b/js/react_native/android/src/androidTest/res/raw/test_types_uint8.ort
index bf2d9ac7f8362..9f5310803323a 100644
Binary files a/js/react_native/android/src/androidTest/res/raw/test_types_uint8.ort and b/js/react_native/android/src/androidTest/res/raw/test_types_uint8.ort differ
diff --git a/js/react_native/e2e/android/app/build.gradle b/js/react_native/e2e/android/app/build.gradle
index 3683aa70da648..ea16f2c87f11c 100644
--- a/js/react_native/e2e/android/app/build.gradle
+++ b/js/react_native/e2e/android/app/build.gradle
@@ -177,6 +177,12 @@ android {
}
}
+repositories {
+ flatDir {
+ dir 'libs'
+ }
+}
+
dependencies {
implementation fileTree(dir: "libs", include: ["*.jar"])
//noinspection GradleDynamicVersion
@@ -207,6 +213,8 @@ dependencies {
androidTestImplementation 'androidx.test:rules:1.4.0'
implementation project(':onnxruntime-react-native')
+ // specify ORT dependency here so it can be found in libs flatDir repository
+ implementation "com.microsoft.onnxruntime:onnxruntime-mobile:latest.integration@aar"
}
// Run this once to be able to run the application with BUCK
diff --git a/js/react_native/e2e/android/app/src/main/assets/mnist.ort b/js/react_native/e2e/android/app/src/main/assets/mnist.ort
index 58dd9e664ac53..a82758ba12de1 100644
Binary files a/js/react_native/e2e/android/app/src/main/assets/mnist.ort and b/js/react_native/e2e/android/app/src/main/assets/mnist.ort differ
diff --git a/js/react_native/e2e/android/app/src/main/java/com/example/reactnativeonnxruntimemodule/MNISTDataHandler.java b/js/react_native/e2e/android/app/src/main/java/com/example/reactnativeonnxruntimemodule/MNISTDataHandler.java
index 8c9d71b76b34c..a458901b5314c 100644
--- a/js/react_native/e2e/android/app/src/main/java/com/example/reactnativeonnxruntimemodule/MNISTDataHandler.java
+++ b/js/react_native/e2e/android/app/src/main/java/com/example/reactnativeonnxruntimemodule/MNISTDataHandler.java
@@ -143,6 +143,7 @@ private WritableMap preprocess(String uri) throws Exception {
WritableArray dims = Arguments.createArray();
dims.pushInt(batchSize);
+ dims.pushInt(1);
dims.pushInt(imageHeight);
dims.pushInt(imageWidth);
inputTensorMap.putArray("dims", dims);
@@ -155,7 +156,7 @@ private WritableMap preprocess(String uri) throws Exception {
String data = Base64.encodeToString(imageByteBuffer.array(), Base64.DEFAULT);
inputTensorMap.putString("data", data);
- inputDataMap.putMap("flatten_2_input", inputTensorMap);
+ inputDataMap.putMap("Input3", inputTensorMap);
return inputDataMap;
}
@@ -164,7 +165,7 @@ private WritableMap preprocess(String uri) throws Exception {
private WritableMap postprocess(ReadableMap result) throws Exception {
String detectionResult = "";
- ReadableMap outputTensor = result.getMap("Identity");
+ ReadableMap outputTensor = result.getMap("Plus214_Output_0");
String outputData = outputTensor.getString("data");
FloatBuffer buffer =
diff --git a/js/react_native/e2e/ios/MNISTDataHandler.mm b/js/react_native/e2e/ios/MNISTDataHandler.mm
index d639ec930daa4..b935a91b63503 100644
--- a/js/react_native/e2e/ios/MNISTDataHandler.mm
+++ b/js/react_native/e2e/ios/MNISTDataHandler.mm
@@ -117,7 +117,9 @@ - (NSDictionary *)preprocess:(NSString *)uri {
// dims
NSArray *dims = @[
- [NSNumber numberWithInt:1], [NSNumber numberWithInt:static_cast(height)],
+ [NSNumber numberWithInt:1],
+ [NSNumber numberWithInt:1],
+ [NSNumber numberWithInt:static_cast(height)],
[NSNumber numberWithInt:static_cast(width)]
];
inputTensorMap[@"dims"] = dims;
@@ -129,7 +131,7 @@ - (NSDictionary *)preprocess:(NSString *)uri {
NSString *data = [byteBufferRef base64EncodedStringWithOptions:0];
inputTensorMap[@"data"] = data;
- inputDataMap[@"flatten_2_input"] = inputTensorMap;
+ inputDataMap[@"Input3"] = inputTensorMap;
return inputDataMap;
}
@@ -137,7 +139,7 @@ - (NSDictionary *)preprocess:(NSString *)uri {
- (NSDictionary *)postprocess:(NSDictionary *)result {
NSMutableString *detectionResult = [NSMutableString string];
- NSDictionary *outputTensor = [result objectForKey:@"Identity"];
+ NSDictionary *outputTensor = [result objectForKey:@"Plus214_Output_0"];
NSString *data = [outputTensor objectForKey:@"data"];
NSData *buffer = [[NSData alloc] initWithBase64EncodedString:data options:0];
diff --git a/onnxruntime/test/testdata/mnist.level1_opt.onnx b/js/react_native/e2e/src/mnist.onnx
similarity index 92%
rename from onnxruntime/test/testdata/mnist.level1_opt.onnx
rename to js/react_native/e2e/src/mnist.onnx
index 70fd5b6f31a82..30761fc7b7be5 100644
Binary files a/onnxruntime/test/testdata/mnist.level1_opt.onnx and b/js/react_native/e2e/src/mnist.onnx differ
diff --git a/js/react_native/e2e/src/mnist.ort b/js/react_native/e2e/src/mnist.ort
index 58dd9e664ac53..e342b0202497f 100644
Binary files a/js/react_native/e2e/src/mnist.ort and b/js/react_native/e2e/src/mnist.ort differ
diff --git a/js/react_native/e2e/src/mnist.readme.md b/js/react_native/e2e/src/mnist.readme.md
new file mode 100644
index 0000000000000..6ba5712dd96e7
--- /dev/null
+++ b/js/react_native/e2e/src/mnist.readme.md
@@ -0,0 +1,14 @@
+`js/react_native/e2e/src/mnist.onnx` is `onnxruntime/test/testdata/mnist.onnx` updated to opset 15.
+
+```bash
+cd /js
+python -m onnxruntime.tools.update_onnx_opset --opset 15 ../onnxruntime/test/testdata/mnist.onnx ./react_native/e2e/src/mnist.onnx
+```
+
+`js/react_native/e2e/src/mnist.ort` and `js/react_native/e2e/android/app/src/main/assets/mnist.ort` are converted from `js/react_native/e2e/src/mnist.onnx`.
+
+```bash
+cd /js
+python -m onnxruntime.tools.convert_onnx_models_to_ort --optimization_style=Fixed --output_dir ./react_native/e2e/android/app/src/main/assets ./react_native/e2e/src/mnist.onnx
+python -m onnxruntime.tools.convert_onnx_models_to_ort --optimization_style=Fixed --output_dir ./react_native/e2e/src ./react_native/e2e/src/mnist.onnx
+```
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_bool.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_bool.ort
index 9267bee5abf76..e83b233e28255 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_bool.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_bool.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_double.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_double.ort
index 9030eca3e4ca1..94f20e0f421f4 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_double.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_double.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_float.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_float.ort
index 01f489572ba4a..e5c40742843d5 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_float.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_float.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int32.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int32.ort
index 8aecad72cfcf7..6135c9a4aca7c 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int32.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int32.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int64.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int64.ort
index b84dc2944bae5..a9892d9ec598d 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int64.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int64.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int8.ort b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int8.ort
index d762f2c5eb22d..f1bf199e488e1 100644
Binary files a/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int8.ort and b/js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_int8.ort differ
diff --git a/js/react_native/ios/OnnxruntimeModuleTest/TensorHelperTest.mm b/js/react_native/ios/OnnxruntimeModuleTest/TensorHelperTest.mm
index ad7606c7f8118..10922f9ef3ffc 100644
--- a/js/react_native/ios/OnnxruntimeModuleTest/TensorHelperTest.mm
+++ b/js/react_native/ios/OnnxruntimeModuleTest/TensorHelperTest.mm
@@ -213,6 +213,7 @@ - (void)testCreateOutputTensorFloat {
}
- (void)testCreateOutputTensorDouble {
+ XCTSkip(@"data type for Slice is not supported in mobile package");
std::array outValues{std::numeric_limits::min(), 1.0f, 2.0f, 3.0f,
std::numeric_limits::max()};
std::function convert = [](double_t value) { return [NSNumber numberWithDouble:value]; };
@@ -220,6 +221,7 @@ - (void)testCreateOutputTensorDouble {
}
- (void)testCreateOutputTensorBool {
+ XCTSkip(@"data type for Slice is not supported in mobile package");
std::array outValues{false, true, true, false, true};
std::function convert = [](bool value) { return [NSNumber numberWithBool:value]; };
testCreateOutputTensorT(outValues, convert, JsTensorTypeBool, @"test_types_bool", @"ort");
diff --git a/js/react_native/test_types_models.readme.md b/js/react_native/test_types_models.readme.md
new file mode 100644
index 0000000000000..fb247fa6fe026
--- /dev/null
+++ b/js/react_native/test_types_models.readme.md
@@ -0,0 +1,15 @@
+`js/react_native/android/src/androidTest/res/raw/test_types_*.ort` and
+`js/react_native/ios/OnnxruntimeModuleTest/Resources/test_types_*.ort` ORT format models are converted from
+`js/node/test/testdata/test_types_*.onnx` ONNX models.
+
+For example, to generate `js/react_native/android/src/androidTest/res/raw/test_types_*.ort`, from the `js` directory,
+run:
+
+```bash
+python -m onnxruntime.tools.convert_onnx_models_to_ort \
+ --optimization_style Fixed \
+ --output_dir ./react_native/android/src/androidTest/res/raw \
+ ./node/test/testdata
+```
+
+Some additional files will be generated. They can be removed.
diff --git a/objectivec/test/testdata/single_add.basic.ort b/objectivec/test/testdata/single_add.basic.ort
index d85f2d4e6c73d..f622784b35366 100644
Binary files a/objectivec/test/testdata/single_add.basic.ort and b/objectivec/test/testdata/single_add.basic.ort differ
diff --git a/onnxruntime/contrib_ops/cpu/maxpool_with_mask.cc b/onnxruntime/contrib_ops/cpu/maxpool_with_mask.cc
index 0dee2079d4328..e0c420d951d84 100644
--- a/onnxruntime/contrib_ops/cpu/maxpool_with_mask.cc
+++ b/onnxruntime/contrib_ops/cpu/maxpool_with_mask.cc
@@ -11,7 +11,7 @@ ONNX_CPU_OPERATOR_TYPED_MS_KERNEL(
1,
float,
KernelDefBuilder()
- .TypeConstraint("X", DataTypeImpl::GetTensorType()),
+ .TypeConstraint("T", DataTypeImpl::GetTensorType()),
MaxpoolWithMask);
} // namespace contrib
diff --git a/onnxruntime/core/common/string_utils.h b/onnxruntime/core/common/string_utils.h
index 33d76e71c24a5..6e0eb460d2a63 100644
--- a/onnxruntime/core/common/string_utils.h
+++ b/onnxruntime/core/common/string_utils.h
@@ -7,6 +7,7 @@
#include
#include "core/common/common.h"
+#include "core/common/inlined_containers.h"
namespace onnxruntime {
namespace utils {
@@ -18,10 +19,10 @@ namespace utils {
* @param keep_empty Whether to keep empty substrings.
* @return The split substrings.
*/
-inline std::vector SplitString(std::string_view string_to_split, std::string_view delimiter,
- bool keep_empty = false) {
+inline InlinedVector SplitString(std::string_view string_to_split, std::string_view delimiter,
+ bool keep_empty = false) {
ORT_ENFORCE(!delimiter.empty(), "delimiter must not be empty");
- std::vector result{};
+ InlinedVector result{};
std::string_view::size_type segment_begin_pos = 0;
while (segment_begin_pos != std::string_view::npos) {
const std::string_view::size_type segment_end_pos = string_to_split.find(delimiter, segment_begin_pos);
diff --git a/onnxruntime/core/flatbuffers/flatbuffers_utils.cc b/onnxruntime/core/flatbuffers/flatbuffers_utils.cc
index 2d926daf3285a..505b79548a1fa 100644
--- a/onnxruntime/core/flatbuffers/flatbuffers_utils.cc
+++ b/onnxruntime/core/flatbuffers/flatbuffers_utils.cc
@@ -1,13 +1,14 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
-#include "flatbuffers_utils.h"
-#include "schema/ort.fbs.h"
+#include "core/flatbuffers/flatbuffers_utils.h"
+
+#include "gsl/gsl"
#include "core/common/common.h"
+#include "core/flatbuffers/schema/ort.fbs.h"
#include "core/graph/constants.h"
#include "core/graph/onnx_protobuf.h"
-#include "gsl/gsl"
using namespace ONNX_NAMESPACE;
using namespace ::onnxruntime::common;
diff --git a/onnxruntime/core/flatbuffers/flatbuffers_utils.h b/onnxruntime/core/flatbuffers/flatbuffers_utils.h
index 570cec740413c..a04bb60453035 100644
--- a/onnxruntime/core/flatbuffers/flatbuffers_utils.h
+++ b/onnxruntime/core/flatbuffers/flatbuffers_utils.h
@@ -5,6 +5,7 @@
#include
+#include "core/common/common.h"
#include "core/common/path_string.h"
#include "core/common/status.h"
@@ -32,11 +33,12 @@ struct ValueInfo;
namespace utils {
+constexpr auto kInvalidOrtFormatModelMessage = "Invalid ORT format model.";
+
// Will only create string in flatbuffers when has_string is true
flatbuffers::Offset SaveStringToOrtFormat(flatbuffers::FlatBufferBuilder& builder,
bool has_string, const std::string& src);
-// TODO, add ORT_MUST_USE_RESULT when it is moved to a different header
onnxruntime::common::Status SaveValueInfoOrtFormat(
flatbuffers::FlatBufferBuilder& builder, const ONNX_NAMESPACE::ValueInfoProto& value_info_proto,
flatbuffers::Offset& fbs_value_info);
@@ -67,3 +69,7 @@ bool IsOrtFormatModelBytes(const void* bytes, int num_bytes);
} // namespace utils
} // namespace fbs
} // namespace onnxruntime
+
+#define ORT_FORMAT_RETURN_IF_NULL(expr, expr_description) \
+ ORT_RETURN_IF((expr) == nullptr, (expr_description), " is null. ", \
+ onnxruntime::fbs::utils::kInvalidOrtFormatModelMessage)
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgType.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgType.py
new file mode 100644
index 0000000000000..a0328a9f469e7
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgType.py
@@ -0,0 +1,8 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+class ArgType(object):
+ INPUT = 0
+ OUTPUT = 1
+
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py
new file mode 100644
index 0000000000000..32aaa298dd99a
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/ArgTypeAndIndex.py
@@ -0,0 +1,44 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class ArgTypeAndIndex(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsArgTypeAndIndex(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ArgTypeAndIndex()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def ArgTypeAndIndexBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
+
+ # ArgTypeAndIndex
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ArgTypeAndIndex
+ def ArgType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
+ return 0
+
+ # ArgTypeAndIndex
+ def Index(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+def ArgTypeAndIndexStart(builder): builder.StartObject(2)
+def ArgTypeAndIndexAddArgType(builder, argType): builder.PrependInt8Slot(0, argType, 0)
+def ArgTypeAndIndexAddIndex(builder, index): builder.PrependUint32Slot(1, index, 0)
+def ArgTypeAndIndexEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelCreateInfos.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py
similarity index 63%
rename from onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelCreateInfos.py
rename to onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py
index 355d462c50797..9f93bffa499d0 100644
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelCreateInfos.py
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedKernelCreateInfos.py
@@ -6,25 +6,26 @@
from flatbuffers.compat import import_numpy
np = import_numpy()
-class KernelCreateInfos(object):
+# deprecated: no longer using kernel def hashes
+class DeprecatedKernelCreateInfos(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsKernelCreateInfos(cls, buf, offset):
+ def GetRootAsDeprecatedKernelCreateInfos(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = KernelCreateInfos()
+ x = DeprecatedKernelCreateInfos()
x.Init(buf, n + offset)
return x
@classmethod
- def KernelCreateInfosBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ def DeprecatedKernelCreateInfosBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def NodeIndices(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
@@ -32,26 +33,26 @@ def NodeIndices(self, j):
return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def NodeIndicesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def NodeIndicesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def NodeIndicesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def KernelDefHashes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
@@ -59,28 +60,28 @@ def KernelDefHashes(self, j):
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def KernelDefHashesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def KernelDefHashesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
- # KernelCreateInfos
+ # DeprecatedKernelCreateInfos
def KernelDefHashesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
-def KernelCreateInfosStart(builder): builder.StartObject(2)
-def KernelCreateInfosAddNodeIndices(builder, nodeIndices): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(nodeIndices), 0)
-def KernelCreateInfosStartNodeIndicesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def KernelCreateInfosAddKernelDefHashes(builder, kernelDefHashes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernelDefHashes), 0)
-def KernelCreateInfosStartKernelDefHashesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
-def KernelCreateInfosEnd(builder): return builder.EndObject()
+def DeprecatedKernelCreateInfosStart(builder): builder.StartObject(2)
+def DeprecatedKernelCreateInfosAddNodeIndices(builder, nodeIndices): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(nodeIndices), 0)
+def DeprecatedKernelCreateInfosStartNodeIndicesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def DeprecatedKernelCreateInfosAddKernelDefHashes(builder, kernelDefHashes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernelDefHashes), 0)
+def DeprecatedKernelCreateInfosStartKernelDefHashesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
+def DeprecatedKernelCreateInfosEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/NodeIndexAndKernelDefHash.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py
similarity index 54%
rename from onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/NodeIndexAndKernelDefHash.py
rename to onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py
index 35c7301f7d2a5..7137233a9e726 100644
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/NodeIndexAndKernelDefHash.py
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedNodeIndexAndKernelDefHash.py
@@ -6,39 +6,40 @@
from flatbuffers.compat import import_numpy
np = import_numpy()
-class NodeIndexAndKernelDefHash(object):
+# deprecated: no longer using kernel def hashes
+class DeprecatedNodeIndexAndKernelDefHash(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsNodeIndexAndKernelDefHash(cls, buf, offset):
+ def GetRootAsDeprecatedNodeIndexAndKernelDefHash(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = NodeIndexAndKernelDefHash()
+ x = DeprecatedNodeIndexAndKernelDefHash()
x.Init(buf, n + offset)
return x
@classmethod
- def NodeIndexAndKernelDefHashBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ def DeprecatedNodeIndexAndKernelDefHashBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
- # NodeIndexAndKernelDefHash
+ # DeprecatedNodeIndexAndKernelDefHash
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
- # NodeIndexAndKernelDefHash
+ # DeprecatedNodeIndexAndKernelDefHash
def NodeIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
- # NodeIndexAndKernelDefHash
+ # DeprecatedNodeIndexAndKernelDefHash
def KernelDefHash(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
-def NodeIndexAndKernelDefHashStart(builder): builder.StartObject(2)
-def NodeIndexAndKernelDefHashAddNodeIndex(builder, nodeIndex): builder.PrependUint32Slot(0, nodeIndex, 0)
-def NodeIndexAndKernelDefHashAddKernelDefHash(builder, kernelDefHash): builder.PrependUint64Slot(1, kernelDefHash, 0)
-def NodeIndexAndKernelDefHashEnd(builder): return builder.EndObject()
+def DeprecatedNodeIndexAndKernelDefHashStart(builder): builder.StartObject(2)
+def DeprecatedNodeIndexAndKernelDefHashAddNodeIndex(builder, nodeIndex): builder.PrependUint32Slot(0, nodeIndex, 0)
+def DeprecatedNodeIndexAndKernelDefHashAddKernelDefHash(builder, kernelDefHash): builder.PrependUint64Slot(1, kernelDefHash, 0)
+def DeprecatedNodeIndexAndKernelDefHashEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SessionState.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSessionState.py
similarity index 53%
rename from onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SessionState.py
rename to onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSessionState.py
index 274a20d7d16bd..fbf21a38c2f5d 100644
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SessionState.py
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSessionState.py
@@ -6,62 +6,63 @@
from flatbuffers.compat import import_numpy
np = import_numpy()
-class SessionState(object):
+# deprecated: no longer using kernel def hashes
+class DeprecatedSessionState(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsSessionState(cls, buf, offset):
+ def GetRootAsDeprecatedSessionState(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = SessionState()
+ x = DeprecatedSessionState()
x.Init(buf, n + offset)
return x
@classmethod
- def SessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ def DeprecatedSessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
- # SessionState
+ # DeprecatedSessionState
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
- # SessionState
+ # DeprecatedSessionState
def Kernels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
- from ort_flatbuffers_py.fbs.KernelCreateInfos import KernelCreateInfos
- obj = KernelCreateInfos()
+ from ort_flatbuffers_py.fbs.DeprecatedKernelCreateInfos import DeprecatedKernelCreateInfos
+ obj = DeprecatedKernelCreateInfos()
obj.Init(self._tab.Bytes, x)
return obj
return None
- # SessionState
+ # DeprecatedSessionState
def SubGraphSessionStates(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
- from ort_flatbuffers_py.fbs.SubGraphSessionState import SubGraphSessionState
- obj = SubGraphSessionState()
+ from ort_flatbuffers_py.fbs.DeprecatedSubGraphSessionState import DeprecatedSubGraphSessionState
+ obj = DeprecatedSubGraphSessionState()
obj.Init(self._tab.Bytes, x)
return obj
return None
- # SessionState
+ # DeprecatedSessionState
def SubGraphSessionStatesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
- # SessionState
+ # DeprecatedSessionState
def SubGraphSessionStatesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
-def SessionStateStart(builder): builder.StartObject(2)
-def SessionStateAddKernels(builder, kernels): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(kernels), 0)
-def SessionStateAddSubGraphSessionStates(builder, subGraphSessionStates): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(subGraphSessionStates), 0)
-def SessionStateStartSubGraphSessionStatesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def SessionStateEnd(builder): return builder.EndObject()
+def DeprecatedSessionStateStart(builder): builder.StartObject(2)
+def DeprecatedSessionStateAddKernels(builder, kernels): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(kernels), 0)
+def DeprecatedSessionStateAddSubGraphSessionStates(builder, subGraphSessionStates): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(subGraphSessionStates), 0)
+def DeprecatedSessionStateStartSubGraphSessionStatesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def DeprecatedSessionStateEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py
new file mode 100644
index 0000000000000..52b450408632c
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/DeprecatedSubGraphSessionState.py
@@ -0,0 +1,49 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+# deprecated: no longer using kernel def hashes
+class DeprecatedSubGraphSessionState(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsDeprecatedSubGraphSessionState(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = DeprecatedSubGraphSessionState()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def DeprecatedSubGraphSessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
+
+ # DeprecatedSubGraphSessionState
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # DeprecatedSubGraphSessionState
+ def GraphId(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # DeprecatedSubGraphSessionState
+ def SessionState(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Indirect(o + self._tab.Pos)
+ from ort_flatbuffers_py.fbs.DeprecatedSessionState import DeprecatedSessionState
+ obj = DeprecatedSessionState()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+def DeprecatedSubGraphSessionStateStart(builder): builder.StartObject(2)
+def DeprecatedSubGraphSessionStateAddGraphId(builder, graphId): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(graphId), 0)
+def DeprecatedSubGraphSessionStateAddSessionState(builder, sessionState): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sessionState), 0)
+def DeprecatedSubGraphSessionStateEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/InferenceSession.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/InferenceSession.py
index d9b7f0d3ec0da..d5a67bf8b8c61 100644
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/InferenceSession.py
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/InferenceSession.py
@@ -43,18 +43,18 @@ def Model(self):
return None
# InferenceSession
- def SessionState(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ def KernelTypeStrResolver(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
- from ort_flatbuffers_py.fbs.SessionState import SessionState
- obj = SessionState()
+ from ort_flatbuffers_py.fbs.KernelTypeStrResolver import KernelTypeStrResolver
+ obj = KernelTypeStrResolver()
obj.Init(self._tab.Bytes, x)
return obj
return None
-def InferenceSessionStart(builder): builder.StartObject(3)
+def InferenceSessionStart(builder): builder.StartObject(4)
def InferenceSessionAddOrtVersion(builder, ortVersion): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(ortVersion), 0)
def InferenceSessionAddModel(builder, model): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(model), 0)
-def InferenceSessionAddSessionState(builder, sessionState): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(sessionState), 0)
+def InferenceSessionAddKernelTypeStrResolver(builder, kernelTypeStrResolver): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStrResolver), 0)
def InferenceSessionEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py
new file mode 100644
index 0000000000000..94f37b38481fd
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrArgsEntry.py
@@ -0,0 +1,63 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class KernelTypeStrArgsEntry(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsKernelTypeStrArgsEntry(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = KernelTypeStrArgsEntry()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def KernelTypeStrArgsEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
+
+ # KernelTypeStrArgsEntry
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # KernelTypeStrArgsEntry
+ def KernelTypeStr(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # KernelTypeStrArgsEntry
+ def Args(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from ort_flatbuffers_py.fbs.ArgTypeAndIndex import ArgTypeAndIndex
+ obj = ArgTypeAndIndex()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # KernelTypeStrArgsEntry
+ def ArgsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # KernelTypeStrArgsEntry
+ def ArgsIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ return o == 0
+
+def KernelTypeStrArgsEntryStart(builder): builder.StartObject(2)
+def KernelTypeStrArgsEntryAddKernelTypeStr(builder, kernelTypeStr): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStr), 0)
+def KernelTypeStrArgsEntryAddArgs(builder, args): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(args), 0)
+def KernelTypeStrArgsEntryStartArgsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def KernelTypeStrArgsEntryEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py
new file mode 100644
index 0000000000000..ef2cd95df91f7
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/KernelTypeStrResolver.py
@@ -0,0 +1,55 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class KernelTypeStrResolver(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsKernelTypeStrResolver(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = KernelTypeStrResolver()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def KernelTypeStrResolverBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
+
+ # KernelTypeStrResolver
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # KernelTypeStrResolver
+ def OpKernelTypeStrArgs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from ort_flatbuffers_py.fbs.OpIdKernelTypeStrArgsEntry import OpIdKernelTypeStrArgsEntry
+ obj = OpIdKernelTypeStrArgsEntry()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # KernelTypeStrResolver
+ def OpKernelTypeStrArgsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # KernelTypeStrResolver
+ def OpKernelTypeStrArgsIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ return o == 0
+
+def KernelTypeStrResolverStart(builder): builder.StartObject(1)
+def KernelTypeStrResolverAddOpKernelTypeStrArgs(builder, opKernelTypeStrArgs): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(opKernelTypeStrArgs), 0)
+def KernelTypeStrResolverStartOpKernelTypeStrArgsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def KernelTypeStrResolverEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py
new file mode 100644
index 0000000000000..97eea172b786b
--- /dev/null
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/OpIdKernelTypeStrArgsEntry.py
@@ -0,0 +1,63 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: fbs
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class OpIdKernelTypeStrArgsEntry(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAsOpIdKernelTypeStrArgsEntry(cls, buf, offset):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = OpIdKernelTypeStrArgsEntry()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def OpIdKernelTypeStrArgsEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
+
+ # OpIdKernelTypeStrArgsEntry
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # OpIdKernelTypeStrArgsEntry
+ def OpId(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # OpIdKernelTypeStrArgsEntry
+ def KernelTypeStrArgs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from ort_flatbuffers_py.fbs.KernelTypeStrArgsEntry import KernelTypeStrArgsEntry
+ obj = KernelTypeStrArgsEntry()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # OpIdKernelTypeStrArgsEntry
+ def KernelTypeStrArgsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # OpIdKernelTypeStrArgsEntry
+ def KernelTypeStrArgsIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ return o == 0
+
+def OpIdKernelTypeStrArgsEntryStart(builder): builder.StartObject(2)
+def OpIdKernelTypeStrArgsEntryAddOpId(builder, opId): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(opId), 0)
+def OpIdKernelTypeStrArgsEntryAddKernelTypeStrArgs(builder, kernelTypeStrArgs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStrArgs), 0)
+def OpIdKernelTypeStrArgsEntryStartKernelTypeStrArgsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def OpIdKernelTypeStrArgsEntryEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py
index 488572506975b..7880cc565f69d 100644
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py
+++ b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/RuntimeOptimizationRecord.py
@@ -45,33 +45,28 @@ def NodesToOptimizeIndices(self):
return None
# RuntimeOptimizationRecord
- def ProducedNodes(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ def ProducedOpIds(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
- x = self._tab.Vector(o)
- x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
- x = self._tab.Indirect(x)
- from ort_flatbuffers_py.fbs.NodeIndexAndKernelDefHash import NodeIndexAndKernelDefHash
- obj = NodeIndexAndKernelDefHash()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
+ a = self._tab.Vector(o)
+ return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return ""
# RuntimeOptimizationRecord
- def ProducedNodesLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ def ProducedOpIdsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# RuntimeOptimizationRecord
- def ProducedNodesIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ def ProducedOpIdsIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
return o == 0
-def RuntimeOptimizationRecordStart(builder): builder.StartObject(3)
+def RuntimeOptimizationRecordStart(builder): builder.StartObject(4)
def RuntimeOptimizationRecordAddActionId(builder, actionId): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(actionId), 0)
def RuntimeOptimizationRecordAddNodesToOptimizeIndices(builder, nodesToOptimizeIndices): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(nodesToOptimizeIndices), 0)
-def RuntimeOptimizationRecordAddProducedNodes(builder, producedNodes): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(producedNodes), 0)
-def RuntimeOptimizationRecordStartProducedNodesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
+def RuntimeOptimizationRecordAddProducedOpIds(builder, producedOpIds): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(producedOpIds), 0)
+def RuntimeOptimizationRecordStartProducedOpIdsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def RuntimeOptimizationRecordEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SubGraphSessionState.py b/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SubGraphSessionState.py
deleted file mode 100644
index dcbabc619d866..0000000000000
--- a/onnxruntime/core/flatbuffers/ort_flatbuffers_py/fbs/SubGraphSessionState.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: fbs
-
-import flatbuffers
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SubGraphSessionState(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsSubGraphSessionState(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = SubGraphSessionState()
- x.Init(buf, n + offset)
- return x
-
- @classmethod
- def SubGraphSessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
- return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
-
- # SubGraphSessionState
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # SubGraphSessionState
- def GraphId(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.String(o + self._tab.Pos)
- return None
-
- # SubGraphSessionState
- def SessionState(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- x = self._tab.Indirect(o + self._tab.Pos)
- from ort_flatbuffers_py.fbs.SessionState import SessionState
- obj = SessionState()
- obj.Init(self._tab.Bytes, x)
- return obj
- return None
-
-def SubGraphSessionStateStart(builder): builder.StartObject(2)
-def SubGraphSessionStateAddGraphId(builder, graphId): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(graphId), 0)
-def SubGraphSessionStateAddSessionState(builder, sessionState): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sessionState), 0)
-def SubGraphSessionStateEnd(builder): return builder.EndObject()
diff --git a/onnxruntime/core/flatbuffers/ort_format_version.h b/onnxruntime/core/flatbuffers/ort_format_version.h
index e48cb6ebb82b2..c6c0ad7c20fa3 100644
--- a/onnxruntime/core/flatbuffers/ort_format_version.h
+++ b/onnxruntime/core/flatbuffers/ort_format_version.h
@@ -19,7 +19,8 @@ namespace onnxruntime {
// Version 2 - add serialization/deserialization of sparse_initializer
// Version 3 - add `graph_doc_string` to Model
// Version 4 - update kernel def hashing to not depend on ordering of type constraint types (NOT BACKWARDS COMPATIBLE)
-constexpr const char* kOrtModelVersion = "4";
+// Version 5 - deprecate kernel def hashes and add KernelTypeStrResolver info to replace them (NOT BACKWARDS COMPATIBLE)
+constexpr const char* kOrtModelVersion = "5";
// Check if the given ort model version is supported in this build
inline bool IsOrtModelVersionSupported(std::string_view ort_model_version) {
diff --git a/onnxruntime/core/flatbuffers/schema/README.md b/onnxruntime/core/flatbuffers/schema/README.md
index c24c1a37e972a..4c15f526551e7 100644
--- a/onnxruntime/core/flatbuffers/schema/README.md
+++ b/onnxruntime/core/flatbuffers/schema/README.md
@@ -1,11 +1,15 @@
# ORT File Format
-This directory contains [the ORT file format schema](ort.fbs) and [the generated C++ header file](ort.fbs.h) for the ORT file format.
+This directory contains [the ORT file format schema](ort.fbs) and [the generated C++ header file](ort.fbs.h) for the
+ORT file format.
-[The ORT file format schema](ort.fbs) uses the [FlatBuffers](https://github.com/google/flatbuffers) serialization library.
+[The ORT file format schema](ort.fbs) uses the [FlatBuffers](https://github.com/google/flatbuffers) serialization
+library.
-Please do not directly modify [the generated C++ header file](ort.fbs.h) or [the generated Python binding files](../ort_flatbuffers_py).
+Please do not directly modify [the generated C++ header file](ort.fbs.h) or [the generated Python binding
+files](../ort_flatbuffers_py).
-The flatbuffers compiler (flatc) is built as part of an ONNX Runtime build. It is located in the external/flatbuffers subdirectory of the build output directory.
+The flatbuffers compiler (flatc) is built as part of an ONNX Runtime build. It is located in the external/flatbuffers
+subdirectory of the build output directory.
e.g.
- Windows Debug build
@@ -13,7 +17,8 @@ e.g.
- Linux Debug build
- /build/Linux/external/flatbuffers/Debug/flatc
-It is possible to use another flatc as well, e.g., from a separate installation. Note that ONNX Runtime uses FlatBuffers 1.12.
+It is possible to use another flatc as well, e.g., from a separate installation. Note that ONNX Runtime uses
+FlatBuffers 1.12.
To update the ORT file format schema and generated files:
1. Modify [the ORT file format schema](ort.fbs).
@@ -24,16 +29,31 @@ To update the ORT file format schema and generated files:
```
# ORT FB format version history
-In [ort_format_version.h](../ort_format_version.h), see `IsOrtModelVersionSupported()` for version array and `kOrtModelVersion` for currently supported version.
+In [ort_format_version.h](../ort_format_version.h), see `IsOrtModelVersionSupported()` for the supported versions and
+`kOrtModelVersion` for the current version.
-## Version 1. History begins
-Initial support for FlatBuffers that includes Model support. Graph support including Attributes, Tensors, Tensor Sequences, Maps and Sequences. Constant initializers are also supported. Constant nodes are converted to constant initializers in the ORT format.
+## Version 1
+History begins.
-## Version 2.
-Support for sparse initializers. Sparse intializers are stored within ORT FlatBuffers format, which includes sparse initializers converted from a Constant node attribute.
+Initial support for FlatBuffers that includes Model support. Graph support including Attributes, Tensors, Tensor
+Sequences, Maps and Sequences. Constant initializers are also supported. Constant nodes are converted to constant
+initializers in the ORT format.
-## Version 3.
+## Version 2
+Support for sparse initializers. Sparse intializers are stored within ORT FlatBuffers format, which includes sparse
+initializers converted from a Constant node attribute.
+
+## Version 3
Support for storing `graph_doc_string` field in Model (ORT FlatBuffers format).
-## Version 4.
+## Version 4
Update kernel def hashing to not depend on ordering of type constraint types (NOT BACKWARDS COMPATIBLE).
+
+## Version 5
+Deprecate kernel def hashes and add KernelTypeStrResolver info to replace them (NOT BACKWARDS COMPATIBLE).
+The change to the ORT format itself is not backwards compatibility-breaking, but ORT does not provide backwards
+compatibility for processing older models with missing KernelTypeStrResolver info.
+
+The motivation for this update is to support additional execution providers with statically registered kernels.
+The original approach of using kernel def hashes is not so extensible as it requires the execution provider providing
+hashes to be enabled at model conversion time.
diff --git a/onnxruntime/core/flatbuffers/schema/compile_schema.py b/onnxruntime/core/flatbuffers/schema/compile_schema.py
index 9f4372a5f32ad..55d332682b937 100644
--- a/onnxruntime/core/flatbuffers/schema/compile_schema.py
+++ b/onnxruntime/core/flatbuffers/schema/compile_schema.py
@@ -4,10 +4,10 @@
import argparse
import pathlib
+import shutil
import subprocess
import tempfile
-
SCRIPT_DIR = pathlib.Path(__file__).parent.resolve()
@@ -22,16 +22,16 @@ def update_namespace(schema_path: pathlib.Path, updated_schema_path: pathlib.Pat
output.write(line.replace('onnxruntime.fbs', 'ort_flatbuffers_py.fbs'))
-def generate_python(flatc: pathlib.Path, schema_path: pathlib.Path):
+def generate_python(flatc: pathlib.Path, schema_path: pathlib.Path, output_dir: pathlib.Path):
# run flatc to generate Python code
cmd = [str(flatc), '--python', str(schema_path)]
- subprocess.run(cmd, check=True, cwd=SCRIPT_DIR.parent)
+ subprocess.run(cmd, check=True, cwd=output_dir)
-def create_init_py():
+def create_init_py(output_dir: pathlib.Path):
# create an __init__.py that imports all the py files so we can just 'import ort_flatbuffers_py.fbs'
# in a script that wants to process an ORT format model
- init_py_path = SCRIPT_DIR.parent / 'ort_flatbuffers_py/fbs/__init__.py'
+ init_py_path = output_dir / 'ort_flatbuffers_py/fbs/__init__.py'
with open(init_py_path, 'w') as init_py:
init_py.write('''from os.path import dirname, basename, isfile, join, splitext
import glob
@@ -69,10 +69,20 @@ def main():
if 'python' in languages:
with tempfile.TemporaryDirectory() as temp_dir_name:
- updated_schema_path = pathlib.Path(temp_dir_name, 'ort.py.fbs').resolve()
+ temp_dir = pathlib.Path(temp_dir_name).resolve()
+ updated_schema_path = temp_dir / 'ort.py.fbs'
update_namespace(schema_path, updated_schema_path)
- generate_python(flatc, updated_schema_path)
- create_init_py()
+
+ output_dir = temp_dir / 'out'
+ output_dir.mkdir()
+ generate_python(flatc, updated_schema_path, output_dir)
+ create_init_py(output_dir)
+
+ # replace generated files in repo
+ target_dir = SCRIPT_DIR.parent / 'ort_flatbuffers_py'
+ if target_dir.is_dir():
+ shutil.rmtree(target_dir)
+ shutil.move(str(output_dir / 'ort_flatbuffers_py'), str(target_dir))
if 'cpp' in languages:
generate_cpp(flatc, schema_path)
diff --git a/onnxruntime/core/flatbuffers/schema/ort.fbs b/onnxruntime/core/flatbuffers/schema/ort.fbs
index ec3b09c3a8f94..62f4362938513 100644
--- a/onnxruntime/core/flatbuffers/schema/ort.fbs
+++ b/onnxruntime/core/flatbuffers/schema/ort.fbs
@@ -22,7 +22,7 @@ enum AttributeType : int32 {
// Shape
table Shape {
-dim:[Dimension];
+ dim:[Dimension];
}
table Dimension {
@@ -63,17 +63,17 @@ enum TensorDataType : int32 {
BFLOAT16 = 16,
}
-table TensorTypeAndShape{
+table TensorTypeAndShape {
elem_type:TensorDataType;
shape:Shape;
}
-table MapType{
+table MapType {
key_type:TensorDataType;
value_type:onnxruntime.fbs.TypeInfo;
}
-table SequenceType{
+table SequenceType {
elem_type:onnxruntime.fbs.TypeInfo;
}
@@ -151,7 +151,7 @@ table Tensor {
raw_data:[uint8];
- // string_data is least used, leave it at the end
+ // string_data is least used
string_data:[string];
}
@@ -161,7 +161,7 @@ table SparseTensor {
dims:[int64];
}
-table Attribute{
+table Attribute {
name:string;
doc_string:string;
@@ -184,7 +184,7 @@ table Attribute{
/// nodes to consider for a runtime optimization
/// see corresponding type in onnxruntime/core/graph/runtime_optimization_record.h
-table NodesToOptimizeIndices{
+table NodesToOptimizeIndices {
node_indices:[uint32];
num_inputs:uint32;
num_outputs:uint32;
@@ -194,30 +194,32 @@ table NodesToOptimizeIndices{
num_variadic_outputs:uint32;
}
-table NodeIndexAndKernelDefHash{
+/// deprecated: no longer using kernel def hashes
+table DeprecatedNodeIndexAndKernelDefHash {
node_index:uint32;
kernel_def_hash:uint64;
}
/// a single runtime optimization
/// see corresponding type in onnxruntime/core/graph/runtime_optimization_record.h
-table RuntimeOptimizationRecord{
+table RuntimeOptimizationRecord {
action_id:string;
nodes_to_optimize_indices:NodesToOptimizeIndices;
- produced_nodes:[NodeIndexAndKernelDefHash];
+ produced_nodes:[DeprecatedNodeIndexAndKernelDefHash] (deprecated);
+ produced_op_ids:[string];
}
-table RuntimeOptimizationRecordContainerEntry{
+table RuntimeOptimizationRecordContainerEntry {
optimizer_name:string (key);
runtime_optimization_records:[RuntimeOptimizationRecord];
}
-table RuntimeOptimizations{
+table RuntimeOptimizations {
/// mapping from optimizer name to [RuntimeOptimizationRecord]
records:[RuntimeOptimizationRecordContainerEntry];
}
-table Graph{
+table Graph {
initializers:[Tensor];
node_args:[ValueInfo];
@@ -253,21 +255,49 @@ table Model {
metadata_props:[StringStringEntry];
}
-table KernelCreateInfos {
+/// deprecated: no longer using kernel def hashes
+table DeprecatedKernelCreateInfos {
node_indices:[uint32];
kernel_def_hashes:[uint64];
}
-table SubGraphSessionState {
- // graph_id can be used to binary search SubGraphSessionState in SessionState.sub_graph_session_states
+/// deprecated: no longer using kernel def hashes
+table DeprecatedSubGraphSessionState {
+ // graph_id can be used to binary search DeprecatedSubGraphSessionState in
+ // DeprecatedSessionState.sub_graph_session_states
graph_id:string (key);
- session_state:SessionState;
+ session_state:DeprecatedSessionState;
}
-table SessionState {
- kernels:KernelCreateInfos;
- sub_graph_session_states:[SubGraphSessionState];
+/// deprecated: no longer using kernel def hashes
+table DeprecatedSessionState {
+ kernels:DeprecatedKernelCreateInfos;
+ sub_graph_session_states:[DeprecatedSubGraphSessionState];
+}
+
+enum ArgType : int8 {
+ INPUT = 0,
+ OUTPUT = 1,
+}
+
+table ArgTypeAndIndex {
+ arg_type:ArgType;
+ index:uint32;
+}
+
+table KernelTypeStrArgsEntry {
+ kernel_type_str:string (key);
+ args:[ArgTypeAndIndex];
+}
+
+table OpIdKernelTypeStrArgsEntry {
+ op_id:string (key);
+ kernel_type_str_args:[KernelTypeStrArgsEntry];
+}
+
+table KernelTypeStrResolver {
+ op_kernel_type_str_args:[OpIdKernelTypeStrArgsEntry];
}
table InferenceSession {
@@ -277,7 +307,9 @@ table InferenceSession {
ort_version:string;
model:Model;
- session_state:SessionState;
+ session_state:DeprecatedSessionState (deprecated);
+
+ kernel_type_str_resolver:KernelTypeStrResolver;
}
root_type InferenceSession;
diff --git a/onnxruntime/core/flatbuffers/schema/ort.fbs.h b/onnxruntime/core/flatbuffers/schema/ort.fbs.h
index 1ee7675bf1cf5..827970c70e4c2 100644
--- a/onnxruntime/core/flatbuffers/schema/ort.fbs.h
+++ b/onnxruntime/core/flatbuffers/schema/ort.fbs.h
@@ -56,8 +56,8 @@ struct AttributeBuilder;
struct NodesToOptimizeIndices;
struct NodesToOptimizeIndicesBuilder;
-struct NodeIndexAndKernelDefHash;
-struct NodeIndexAndKernelDefHashBuilder;
+struct DeprecatedNodeIndexAndKernelDefHash;
+struct DeprecatedNodeIndexAndKernelDefHashBuilder;
struct RuntimeOptimizationRecord;
struct RuntimeOptimizationRecordBuilder;
@@ -77,14 +77,26 @@ struct StringStringEntryBuilder;
struct Model;
struct ModelBuilder;
-struct KernelCreateInfos;
-struct KernelCreateInfosBuilder;
+struct DeprecatedKernelCreateInfos;
+struct DeprecatedKernelCreateInfosBuilder;
-struct SubGraphSessionState;
-struct SubGraphSessionStateBuilder;
+struct DeprecatedSubGraphSessionState;
+struct DeprecatedSubGraphSessionStateBuilder;
-struct SessionState;
-struct SessionStateBuilder;
+struct DeprecatedSessionState;
+struct DeprecatedSessionStateBuilder;
+
+struct ArgTypeAndIndex;
+struct ArgTypeAndIndexBuilder;
+
+struct KernelTypeStrArgsEntry;
+struct KernelTypeStrArgsEntryBuilder;
+
+struct OpIdKernelTypeStrArgsEntry;
+struct OpIdKernelTypeStrArgsEntryBuilder;
+
+struct KernelTypeStrResolver;
+struct KernelTypeStrResolverBuilder;
struct InferenceSession;
struct InferenceSessionBuilder;
@@ -345,6 +357,36 @@ template<> struct TypeInfoValueTraits {
bool VerifyTypeInfoValue(flatbuffers::Verifier &verifier, const void *obj, TypeInfoValue type);
bool VerifyTypeInfoValueVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types);
+enum class ArgType : int8_t {
+ INPUT = 0,
+ OUTPUT = 1,
+ MIN = INPUT,
+ MAX = OUTPUT
+};
+
+inline const ArgType (&EnumValuesArgType())[2] {
+ static const ArgType values[] = {
+ ArgType::INPUT,
+ ArgType::OUTPUT
+ };
+ return values;
+}
+
+inline const char * const *EnumNamesArgType() {
+ static const char * const names[3] = {
+ "INPUT",
+ "OUTPUT",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameArgType(ArgType e) {
+ if (flatbuffers::IsOutRange(e, ArgType::INPUT, ArgType::OUTPUT)) return "";
+ const size_t index = static_cast(e);
+ return EnumNamesArgType()[index];
+}
+
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) EdgeEnd FLATBUFFERS_FINAL_CLASS {
private:
uint32_t node_index_;
@@ -1793,8 +1835,9 @@ inline flatbuffers::Offset CreateNodesToOptimizeIndicesD
num_variadic_outputs);
}
-struct NodeIndexAndKernelDefHash FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef NodeIndexAndKernelDefHashBuilder Builder;
+/// deprecated: no longer using kernel def hashes
+struct DeprecatedNodeIndexAndKernelDefHash FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef DeprecatedNodeIndexAndKernelDefHashBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE_INDEX = 4,
VT_KERNEL_DEF_HASH = 6
@@ -1813,33 +1856,33 @@ struct NodeIndexAndKernelDefHash FLATBUFFERS_FINAL_CLASS : private flatbuffers::
}
};
-struct NodeIndexAndKernelDefHashBuilder {
- typedef NodeIndexAndKernelDefHash Table;
+struct DeprecatedNodeIndexAndKernelDefHashBuilder {
+ typedef DeprecatedNodeIndexAndKernelDefHash Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node_index(uint32_t node_index) {
- fbb_.AddElement(NodeIndexAndKernelDefHash::VT_NODE_INDEX, node_index, 0);
+ fbb_.AddElement(DeprecatedNodeIndexAndKernelDefHash::VT_NODE_INDEX, node_index, 0);
}
void add_kernel_def_hash(uint64_t kernel_def_hash) {
- fbb_.AddElement(NodeIndexAndKernelDefHash::VT_KERNEL_DEF_HASH, kernel_def_hash, 0);
+ fbb_.AddElement(DeprecatedNodeIndexAndKernelDefHash::VT_KERNEL_DEF_HASH, kernel_def_hash, 0);
}
- explicit NodeIndexAndKernelDefHashBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit DeprecatedNodeIndexAndKernelDefHashBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- NodeIndexAndKernelDefHashBuilder &operator=(const NodeIndexAndKernelDefHashBuilder &);
- flatbuffers::Offset Finish() {
+ DeprecatedNodeIndexAndKernelDefHashBuilder &operator=(const DeprecatedNodeIndexAndKernelDefHashBuilder &);
+ flatbuffers::Offset Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset(end);
+ auto o = flatbuffers::Offset(end);
return o;
}
};
-inline flatbuffers::Offset CreateNodeIndexAndKernelDefHash(
+inline flatbuffers::Offset CreateDeprecatedNodeIndexAndKernelDefHash(
flatbuffers::FlatBufferBuilder &_fbb,
uint32_t node_index = 0,
uint64_t kernel_def_hash = 0) {
- NodeIndexAndKernelDefHashBuilder builder_(_fbb);
+ DeprecatedNodeIndexAndKernelDefHashBuilder builder_(_fbb);
builder_.add_kernel_def_hash(kernel_def_hash);
builder_.add_node_index(node_index);
return builder_.Finish();
@@ -1852,7 +1895,7 @@ struct RuntimeOptimizationRecord FLATBUFFERS_FINAL_CLASS : private flatbuffers::
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ACTION_ID = 4,
VT_NODES_TO_OPTIMIZE_INDICES = 6,
- VT_PRODUCED_NODES = 8
+ VT_PRODUCED_OP_IDS = 10
};
const flatbuffers::String *action_id() const {
return GetPointer(VT_ACTION_ID);
@@ -1860,8 +1903,8 @@ struct RuntimeOptimizationRecord FLATBUFFERS_FINAL_CLASS : private flatbuffers::
const onnxruntime::fbs::NodesToOptimizeIndices *nodes_to_optimize_indices() const {
return GetPointer(VT_NODES_TO_OPTIMIZE_INDICES);
}
- const flatbuffers::Vector> *produced_nodes() const {
- return GetPointer> *>(VT_PRODUCED_NODES);
+ const flatbuffers::Vector> *produced_op_ids() const {
+ return GetPointer> *>(VT_PRODUCED_OP_IDS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
@@ -1869,9 +1912,9 @@ struct RuntimeOptimizationRecord FLATBUFFERS_FINAL_CLASS : private flatbuffers::
verifier.VerifyString(action_id()) &&
VerifyOffset(verifier, VT_NODES_TO_OPTIMIZE_INDICES) &&
verifier.VerifyTable(nodes_to_optimize_indices()) &&
- VerifyOffset(verifier, VT_PRODUCED_NODES) &&
- verifier.VerifyVector(produced_nodes()) &&
- verifier.VerifyVectorOfTables(produced_nodes()) &&
+ VerifyOffset(verifier, VT_PRODUCED_OP_IDS) &&
+ verifier.VerifyVector(produced_op_ids()) &&
+ verifier.VerifyVectorOfStrings(produced_op_ids()) &&
verifier.EndTable();
}
};
@@ -1886,8 +1929,8 @@ struct RuntimeOptimizationRecordBuilder {
void add_nodes_to_optimize_indices(flatbuffers::Offset nodes_to_optimize_indices) {
fbb_.AddOffset(RuntimeOptimizationRecord::VT_NODES_TO_OPTIMIZE_INDICES, nodes_to_optimize_indices);
}
- void add_produced_nodes(flatbuffers::Offset>> produced_nodes) {
- fbb_.AddOffset(RuntimeOptimizationRecord::VT_PRODUCED_NODES, produced_nodes);
+ void add_produced_op_ids(flatbuffers::Offset>> produced_op_ids) {
+ fbb_.AddOffset(RuntimeOptimizationRecord::VT_PRODUCED_OP_IDS, produced_op_ids);
}
explicit RuntimeOptimizationRecordBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
@@ -1905,9 +1948,9 @@ inline flatbuffers::Offset CreateRuntimeOptimizationR
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset action_id = 0,
flatbuffers::Offset nodes_to_optimize_indices = 0,
- flatbuffers::Offset>> produced_nodes = 0) {
+ flatbuffers::Offset>> produced_op_ids = 0) {
RuntimeOptimizationRecordBuilder builder_(_fbb);
- builder_.add_produced_nodes(produced_nodes);
+ builder_.add_produced_op_ids(produced_op_ids);
builder_.add_nodes_to_optimize_indices(nodes_to_optimize_indices);
builder_.add_action_id(action_id);
return builder_.Finish();
@@ -1917,14 +1960,14 @@ inline flatbuffers::Offset CreateRuntimeOptimizationR
flatbuffers::FlatBufferBuilder &_fbb,
const char *action_id = nullptr,
flatbuffers::Offset nodes_to_optimize_indices = 0,
- const std::vector> *produced_nodes = nullptr) {
+ const std::vector> *produced_op_ids = nullptr) {
auto action_id__ = action_id ? _fbb.CreateString(action_id) : 0;
- auto produced_nodes__ = produced_nodes ? _fbb.CreateVector>(*produced_nodes) : 0;
+ auto produced_op_ids__ = produced_op_ids ? _fbb.CreateVector>(*produced_op_ids) : 0;
return onnxruntime::fbs::CreateRuntimeOptimizationRecord(
_fbb,
action_id__,
nodes_to_optimize_indices,
- produced_nodes__);
+ produced_op_ids__);
}
struct RuntimeOptimizationRecordContainerEntry FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@@ -2464,8 +2507,9 @@ inline flatbuffers::Offset CreateModelDirect(
metadata_props__);
}
-struct KernelCreateInfos FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef KernelCreateInfosBuilder Builder;
+/// deprecated: no longer using kernel def hashes
+struct DeprecatedKernelCreateInfos FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef DeprecatedKernelCreateInfosBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NODE_INDICES = 4,
VT_KERNEL_DEF_HASHES = 6
@@ -2486,52 +2530,53 @@ struct KernelCreateInfos FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
}
};
-struct KernelCreateInfosBuilder {
- typedef KernelCreateInfos Table;
+struct DeprecatedKernelCreateInfosBuilder {
+ typedef DeprecatedKernelCreateInfos Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_node_indices(flatbuffers::Offset> node_indices) {
- fbb_.AddOffset(KernelCreateInfos::VT_NODE_INDICES, node_indices);
+ fbb_.AddOffset(DeprecatedKernelCreateInfos::VT_NODE_INDICES, node_indices);
}
void add_kernel_def_hashes(flatbuffers::Offset> kernel_def_hashes) {
- fbb_.AddOffset(KernelCreateInfos::VT_KERNEL_DEF_HASHES, kernel_def_hashes);
+ fbb_.AddOffset(DeprecatedKernelCreateInfos::VT_KERNEL_DEF_HASHES, kernel_def_hashes);
}
- explicit KernelCreateInfosBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit DeprecatedKernelCreateInfosBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- KernelCreateInfosBuilder &operator=(const KernelCreateInfosBuilder &);
- flatbuffers::Offset Finish() {
+ DeprecatedKernelCreateInfosBuilder &operator=(const DeprecatedKernelCreateInfosBuilder &);
+ flatbuffers::Offset Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset(end);
+ auto o = flatbuffers::Offset(end);
return o;
}
};
-inline flatbuffers::Offset CreateKernelCreateInfos(
+inline flatbuffers::Offset CreateDeprecatedKernelCreateInfos(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset> node_indices = 0,
flatbuffers::Offset> kernel_def_hashes = 0) {
- KernelCreateInfosBuilder builder_(_fbb);
+ DeprecatedKernelCreateInfosBuilder builder_(_fbb);
builder_.add_kernel_def_hashes(kernel_def_hashes);
builder_.add_node_indices(node_indices);
return builder_.Finish();
}
-inline flatbuffers::Offset CreateKernelCreateInfosDirect(
+inline flatbuffers::Offset CreateDeprecatedKernelCreateInfosDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector *node_indices = nullptr,
const std::vector *kernel_def_hashes = nullptr) {
auto node_indices__ = node_indices ? _fbb.CreateVector(*node_indices) : 0;
auto kernel_def_hashes__ = kernel_def_hashes ? _fbb.CreateVector(*kernel_def_hashes) : 0;
- return onnxruntime::fbs::CreateKernelCreateInfos(
+ return onnxruntime::fbs::CreateDeprecatedKernelCreateInfos(
_fbb,
node_indices__,
kernel_def_hashes__);
}
-struct SubGraphSessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef SubGraphSessionStateBuilder Builder;
+/// deprecated: no longer using kernel def hashes
+struct DeprecatedSubGraphSessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef DeprecatedSubGraphSessionStateBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_GRAPH_ID = 4,
VT_SESSION_STATE = 6
@@ -2539,14 +2584,14 @@ struct SubGraphSessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
const flatbuffers::String *graph_id() const {
return GetPointer(VT_GRAPH_ID);
}
- bool KeyCompareLessThan(const SubGraphSessionState *o) const {
+ bool KeyCompareLessThan(const DeprecatedSubGraphSessionState *o) const {
return *graph_id() < *o->graph_id();
}
int KeyCompareWithValue(const char *val) const {
return strcmp(graph_id()->c_str(), val);
}
- const onnxruntime::fbs::SessionState *session_state() const {
- return GetPointer(VT_SESSION_STATE);
+ const onnxruntime::fbs::DeprecatedSessionState *session_state() const {
+ return GetPointer(VT_SESSION_STATE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
@@ -2558,61 +2603,62 @@ struct SubGraphSessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
}
};
-struct SubGraphSessionStateBuilder {
- typedef SubGraphSessionState Table;
+struct DeprecatedSubGraphSessionStateBuilder {
+ typedef DeprecatedSubGraphSessionState Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_graph_id(flatbuffers::Offset graph_id) {
- fbb_.AddOffset(SubGraphSessionState::VT_GRAPH_ID, graph_id);
+ fbb_.AddOffset(DeprecatedSubGraphSessionState::VT_GRAPH_ID, graph_id);
}
- void add_session_state(flatbuffers::Offset session_state) {
- fbb_.AddOffset(SubGraphSessionState::VT_SESSION_STATE, session_state);
+ void add_session_state(flatbuffers::Offset session_state) {
+ fbb_.AddOffset(DeprecatedSubGraphSessionState::VT_SESSION_STATE, session_state);
}
- explicit SubGraphSessionStateBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit DeprecatedSubGraphSessionStateBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- SubGraphSessionStateBuilder &operator=(const SubGraphSessionStateBuilder &);
- flatbuffers::Offset Finish() {
+ DeprecatedSubGraphSessionStateBuilder &operator=(const DeprecatedSubGraphSessionStateBuilder &);
+ flatbuffers::Offset Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset(end);
- fbb_.Required(o, SubGraphSessionState::VT_GRAPH_ID);
+ auto o = flatbuffers::Offset(end);
+ fbb_.Required(o, DeprecatedSubGraphSessionState::VT_GRAPH_ID);
return o;
}
};
-inline flatbuffers::Offset CreateSubGraphSessionState(
+inline flatbuffers::Offset CreateDeprecatedSubGraphSessionState(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset graph_id = 0,
- flatbuffers::Offset session_state = 0) {
- SubGraphSessionStateBuilder builder_(_fbb);
+ flatbuffers::Offset session_state = 0) {
+ DeprecatedSubGraphSessionStateBuilder builder_(_fbb);
builder_.add_session_state(session_state);
builder_.add_graph_id(graph_id);
return builder_.Finish();
}
-inline flatbuffers::Offset CreateSubGraphSessionStateDirect(
+inline flatbuffers::Offset CreateDeprecatedSubGraphSessionStateDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *graph_id = nullptr,
- flatbuffers::Offset session_state = 0) {
+ flatbuffers::Offset session_state = 0) {
auto graph_id__ = graph_id ? _fbb.CreateString(graph_id) : 0;
- return onnxruntime::fbs::CreateSubGraphSessionState(
+ return onnxruntime::fbs::CreateDeprecatedSubGraphSessionState(
_fbb,
graph_id__,
session_state);
}
-struct SessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef SessionStateBuilder Builder;
+/// deprecated: no longer using kernel def hashes
+struct DeprecatedSessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef DeprecatedSessionStateBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_KERNELS = 4,
VT_SUB_GRAPH_SESSION_STATES = 6
};
- const onnxruntime::fbs::KernelCreateInfos *kernels() const {
- return GetPointer(VT_KERNELS);
+ const onnxruntime::fbs::DeprecatedKernelCreateInfos *kernels() const {
+ return GetPointer(VT_KERNELS);
}
- const flatbuffers::Vector> *sub_graph_session_states() const {
- return GetPointer> *>(VT_SUB_GRAPH_SESSION_STATES);
+ const flatbuffers::Vector> *sub_graph_session_states() const {
+ return GetPointer> *>(VT_SUB_GRAPH_SESSION_STATES);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
@@ -2625,55 +2671,308 @@ struct SessionState FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
}
};
-struct SessionStateBuilder {
- typedef SessionState Table;
+struct DeprecatedSessionStateBuilder {
+ typedef DeprecatedSessionState Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
- void add_kernels(flatbuffers::Offset kernels) {
- fbb_.AddOffset(SessionState::VT_KERNELS, kernels);
+ void add_kernels(flatbuffers::Offset kernels) {
+ fbb_.AddOffset(DeprecatedSessionState::VT_KERNELS, kernels);
}
- void add_sub_graph_session_states(flatbuffers::Offset>> sub_graph_session_states) {
- fbb_.AddOffset(SessionState::VT_SUB_GRAPH_SESSION_STATES, sub_graph_session_states);
+ void add_sub_graph_session_states(flatbuffers::Offset>> sub_graph_session_states) {
+ fbb_.AddOffset(DeprecatedSessionState::VT_SUB_GRAPH_SESSION_STATES, sub_graph_session_states);
}
- explicit SessionStateBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit DeprecatedSessionStateBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- SessionStateBuilder &operator=(const SessionStateBuilder &);
- flatbuffers::Offset Finish() {
+ DeprecatedSessionStateBuilder &operator=(const DeprecatedSessionStateBuilder &);
+ flatbuffers::Offset Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset(end);
+ auto o = flatbuffers::Offset(end);
return o;
}
};
-inline flatbuffers::Offset CreateSessionState(
+inline flatbuffers::Offset CreateDeprecatedSessionState(
flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset kernels = 0,
- flatbuffers::Offset>> sub_graph_session_states = 0) {
- SessionStateBuilder builder_(_fbb);
+ flatbuffers::Offset kernels = 0,
+ flatbuffers::Offset>> sub_graph_session_states = 0) {
+ DeprecatedSessionStateBuilder builder_(_fbb);
builder_.add_sub_graph_session_states(sub_graph_session_states);
builder_.add_kernels(kernels);
return builder_.Finish();
}
-inline flatbuffers::Offset CreateSessionStateDirect(
+inline flatbuffers::Offset CreateDeprecatedSessionStateDirect(
flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset kernels = 0,
- std::vector> *sub_graph_session_states = nullptr) {
- auto sub_graph_session_states__ = sub_graph_session_states ? _fbb.CreateVectorOfSortedTables(sub_graph_session_states) : 0;
- return onnxruntime::fbs::CreateSessionState(
+ flatbuffers::Offset kernels = 0,
+ std::vector> *sub_graph_session_states = nullptr) {
+ auto sub_graph_session_states__ = sub_graph_session_states ? _fbb.CreateVectorOfSortedTables(sub_graph_session_states) : 0;
+ return onnxruntime::fbs::CreateDeprecatedSessionState(
_fbb,
kernels,
sub_graph_session_states__);
}
+struct ArgTypeAndIndex FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef ArgTypeAndIndexBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_ARG_TYPE = 4,
+ VT_INDEX = 6
+ };
+ onnxruntime::fbs::ArgType arg_type() const {
+ return static_cast(GetField(VT_ARG_TYPE, 0));
+ }
+ uint32_t index() const {
+ return GetField(VT_INDEX, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField(verifier, VT_ARG_TYPE) &&
+ VerifyField(verifier, VT_INDEX) &&
+ verifier.EndTable();
+ }
+};
+
+struct ArgTypeAndIndexBuilder {
+ typedef ArgTypeAndIndex Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_arg_type(onnxruntime::fbs::ArgType arg_type) {
+ fbb_.AddElement(ArgTypeAndIndex::VT_ARG_TYPE, static_cast(arg_type), 0);
+ }
+ void add_index(uint32_t index) {
+ fbb_.AddElement(ArgTypeAndIndex::VT_INDEX, index, 0);
+ }
+ explicit ArgTypeAndIndexBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ArgTypeAndIndexBuilder &operator=(const ArgTypeAndIndexBuilder &);
+ flatbuffers::Offset Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset CreateArgTypeAndIndex(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ onnxruntime::fbs::ArgType arg_type = onnxruntime::fbs::ArgType::INPUT,
+ uint32_t index = 0) {
+ ArgTypeAndIndexBuilder builder_(_fbb);
+ builder_.add_index(index);
+ builder_.add_arg_type(arg_type);
+ return builder_.Finish();
+}
+
+struct KernelTypeStrArgsEntry FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef KernelTypeStrArgsEntryBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_KERNEL_TYPE_STR = 4,
+ VT_ARGS = 6
+ };
+ const flatbuffers::String *kernel_type_str() const {
+ return GetPointer(VT_KERNEL_TYPE_STR);
+ }
+ bool KeyCompareLessThan(const KernelTypeStrArgsEntry *o) const {
+ return *kernel_type_str() < *o->kernel_type_str();
+ }
+ int KeyCompareWithValue(const char *val) const {
+ return strcmp(kernel_type_str()->c_str(), val);
+ }
+ const flatbuffers::Vector> *args() const {
+ return GetPointer> *>(VT_ARGS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffsetRequired(verifier, VT_KERNEL_TYPE_STR) &&
+ verifier.VerifyString(kernel_type_str()) &&
+ VerifyOffset(verifier, VT_ARGS) &&
+ verifier.VerifyVector(args()) &&
+ verifier.VerifyVectorOfTables(args()) &&
+ verifier.EndTable();
+ }
+};
+
+struct KernelTypeStrArgsEntryBuilder {
+ typedef KernelTypeStrArgsEntry Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_kernel_type_str(flatbuffers::Offset kernel_type_str) {
+ fbb_.AddOffset(KernelTypeStrArgsEntry::VT_KERNEL_TYPE_STR, kernel_type_str);
+ }
+ void add_args(flatbuffers::Offset>> args) {
+ fbb_.AddOffset(KernelTypeStrArgsEntry::VT_ARGS, args);
+ }
+ explicit KernelTypeStrArgsEntryBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ KernelTypeStrArgsEntryBuilder &operator=(const KernelTypeStrArgsEntryBuilder &);
+ flatbuffers::Offset Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset(end);
+ fbb_.Required(o, KernelTypeStrArgsEntry::VT_KERNEL_TYPE_STR);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset CreateKernelTypeStrArgsEntry(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset kernel_type_str = 0,
+ flatbuffers::Offset>> args = 0) {
+ KernelTypeStrArgsEntryBuilder builder_(_fbb);
+ builder_.add_args(args);
+ builder_.add_kernel_type_str(kernel_type_str);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset CreateKernelTypeStrArgsEntryDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const char *kernel_type_str = nullptr,
+ const std::vector> *args = nullptr) {
+ auto kernel_type_str__ = kernel_type_str ? _fbb.CreateString(kernel_type_str) : 0;
+ auto args__ = args ? _fbb.CreateVector>(*args) : 0;
+ return onnxruntime::fbs::CreateKernelTypeStrArgsEntry(
+ _fbb,
+ kernel_type_str__,
+ args__);
+}
+
+struct OpIdKernelTypeStrArgsEntry FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef OpIdKernelTypeStrArgsEntryBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_OP_ID = 4,
+ VT_KERNEL_TYPE_STR_ARGS = 6
+ };
+ const flatbuffers::String *op_id() const {
+ return GetPointer(VT_OP_ID);
+ }
+ bool KeyCompareLessThan(const OpIdKernelTypeStrArgsEntry *o) const {
+ return *op_id() < *o->op_id();
+ }
+ int KeyCompareWithValue(const char *val) const {
+ return strcmp(op_id()->c_str(), val);
+ }
+ const flatbuffers::Vector> *kernel_type_str_args() const {
+ return GetPointer> *>(VT_KERNEL_TYPE_STR_ARGS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffsetRequired(verifier, VT_OP_ID) &&
+ verifier.VerifyString(op_id()) &&
+ VerifyOffset(verifier, VT_KERNEL_TYPE_STR_ARGS) &&
+ verifier.VerifyVector(kernel_type_str_args()) &&
+ verifier.VerifyVectorOfTables(kernel_type_str_args()) &&
+ verifier.EndTable();
+ }
+};
+
+struct OpIdKernelTypeStrArgsEntryBuilder {
+ typedef OpIdKernelTypeStrArgsEntry Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_op_id(flatbuffers::Offset op_id) {
+ fbb_.AddOffset(OpIdKernelTypeStrArgsEntry::VT_OP_ID, op_id);
+ }
+ void add_kernel_type_str_args(flatbuffers::Offset>> kernel_type_str_args) {
+ fbb_.AddOffset(OpIdKernelTypeStrArgsEntry::VT_KERNEL_TYPE_STR_ARGS, kernel_type_str_args);
+ }
+ explicit OpIdKernelTypeStrArgsEntryBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ OpIdKernelTypeStrArgsEntryBuilder &operator=(const OpIdKernelTypeStrArgsEntryBuilder &);
+ flatbuffers::Offset Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset(end);
+ fbb_.Required(o, OpIdKernelTypeStrArgsEntry::VT_OP_ID);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset CreateOpIdKernelTypeStrArgsEntry(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset op_id = 0,
+ flatbuffers::Offset>> kernel_type_str_args = 0) {
+ OpIdKernelTypeStrArgsEntryBuilder builder_(_fbb);
+ builder_.add_kernel_type_str_args(kernel_type_str_args);
+ builder_.add_op_id(op_id);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset CreateOpIdKernelTypeStrArgsEntryDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const char *op_id = nullptr,
+ std::vector> *kernel_type_str_args = nullptr) {
+ auto op_id__ = op_id ? _fbb.CreateString(op_id) : 0;
+ auto kernel_type_str_args__ = kernel_type_str_args ? _fbb.CreateVectorOfSortedTables(kernel_type_str_args) : 0;
+ return onnxruntime::fbs::CreateOpIdKernelTypeStrArgsEntry(
+ _fbb,
+ op_id__,
+ kernel_type_str_args__);
+}
+
+struct KernelTypeStrResolver FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef KernelTypeStrResolverBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_OP_KERNEL_TYPE_STR_ARGS = 4
+ };
+ const flatbuffers::Vector> *op_kernel_type_str_args() const {
+ return GetPointer> *>(VT_OP_KERNEL_TYPE_STR_ARGS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_OP_KERNEL_TYPE_STR_ARGS) &&
+ verifier.VerifyVector(op_kernel_type_str_args()) &&
+ verifier.VerifyVectorOfTables(op_kernel_type_str_args()) &&
+ verifier.EndTable();
+ }
+};
+
+struct KernelTypeStrResolverBuilder {
+ typedef KernelTypeStrResolver Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_op_kernel_type_str_args(flatbuffers::Offset>> op_kernel_type_str_args) {
+ fbb_.AddOffset(KernelTypeStrResolver::VT_OP_KERNEL_TYPE_STR_ARGS, op_kernel_type_str_args);
+ }
+ explicit KernelTypeStrResolverBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ KernelTypeStrResolverBuilder &operator=(const KernelTypeStrResolverBuilder &);
+ flatbuffers::Offset Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset CreateKernelTypeStrResolver(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset>> op_kernel_type_str_args = 0) {
+ KernelTypeStrResolverBuilder builder_(_fbb);
+ builder_.add_op_kernel_type_str_args(op_kernel_type_str_args);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset CreateKernelTypeStrResolverDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ std::vector> *op_kernel_type_str_args = nullptr) {
+ auto op_kernel_type_str_args__ = op_kernel_type_str_args ? _fbb.CreateVectorOfSortedTables(op_kernel_type_str_args) : 0;
+ return onnxruntime::fbs::CreateKernelTypeStrResolver(
+ _fbb,
+ op_kernel_type_str_args__);
+}
+
struct InferenceSession FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef InferenceSessionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ORT_VERSION = 4,
VT_MODEL = 6,
- VT_SESSION_STATE = 8
+ VT_KERNEL_TYPE_STR_RESOLVER = 10
};
const flatbuffers::String *ort_version() const {
return GetPointer(VT_ORT_VERSION);
@@ -2681,8 +2980,8 @@ struct InferenceSession FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const onnxruntime::fbs::Model *model() const {
return GetPointer(VT_MODEL);
}
- const onnxruntime::fbs::SessionState *session_state() const {
- return GetPointer(VT_SESSION_STATE);
+ const onnxruntime::fbs::KernelTypeStrResolver *kernel_type_str_resolver() const {
+ return GetPointer(VT_KERNEL_TYPE_STR_RESOLVER);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
@@ -2690,8 +2989,8 @@ struct InferenceSession FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyString(ort_version()) &&
VerifyOffset(verifier, VT_MODEL) &&
verifier.VerifyTable(model()) &&
- VerifyOffset(verifier, VT_SESSION_STATE) &&
- verifier.VerifyTable(session_state()) &&
+ VerifyOffset(verifier, VT_KERNEL_TYPE_STR_RESOLVER) &&
+ verifier.VerifyTable(kernel_type_str_resolver()) &&
verifier.EndTable();
}
};
@@ -2706,8 +3005,8 @@ struct InferenceSessionBuilder {
void add_model(flatbuffers::Offset model) {
fbb_.AddOffset(InferenceSession::VT_MODEL, model);
}
- void add_session_state(flatbuffers::Offset session_state) {
- fbb_.AddOffset(InferenceSession::VT_SESSION_STATE, session_state);
+ void add_kernel_type_str_resolver(flatbuffers::Offset kernel_type_str_resolver) {
+ fbb_.AddOffset(InferenceSession::VT_KERNEL_TYPE_STR_RESOLVER, kernel_type_str_resolver);
}
explicit InferenceSessionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
@@ -2725,9 +3024,9 @@ inline flatbuffers::Offset CreateInferenceSession(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset ort_version = 0,
flatbuffers::Offset model = 0,
- flatbuffers::Offset session_state = 0) {
+ flatbuffers::Offset kernel_type_str_resolver = 0) {
InferenceSessionBuilder builder_(_fbb);
- builder_.add_session_state(session_state);
+ builder_.add_kernel_type_str_resolver(kernel_type_str_resolver);
builder_.add_model(model);
builder_.add_ort_version(ort_version);
return builder_.Finish();
@@ -2737,13 +3036,13 @@ inline flatbuffers::Offset