diff --git a/contrib/components/openvino/model_convert/README.md b/contrib/components/openvino/model_convert/README.md
index 27ad32254b4..5979b1186e1 100644
--- a/contrib/components/openvino/model_convert/README.md
+++ b/contrib/components/openvino/model_convert/README.md
@@ -236,17 +236,22 @@ Kaldi-specific parameters:
The output folder specify then should be uploaded the generated model file in IR format with .bin and .xml
extensions.
-The component also saved the generated model files in location: `/tmp/model.bin` and `/tmp/model.xml`
-so this path could be used in the argo pipeline for storing the workflow artifacts.
+The component also creates 3 files including the paths to generated model:
+- `/tmp/output.txt` - GSC path to the folder including the generated model files.
+- `/tmp/bin_path.txt` - GSC path to weights model file
+- `/tmp/xml_path.txt` - GSC path to graph model file
+They can be used as parameters to be passed to other jobs in ML pipelines.
## Examples
-Input path - gs://tensorflow_model_path/resnet/1/saved_model.pb
-
-MO options - --saved_model_dir .
-
+Input path - gs://tensorflow_model_path/resnet/1/saved_model.pb
+MO options - --saved_model_dir .
Output path - gs://tensorflow_model_path/resnet/1
+Input path - gs://tensorflow_model_path/resnet/1
+MO options - --saved_model_dir 1
+Output path - gs://tensorflow_model_path/resnet/dldt/1
+
## Building docker image
@@ -260,7 +265,7 @@ This component requires GCP authentication token in json format generated for th
which has access to GCS location. In the example below it is in key.json in the current path.
```bash
-COMMAND="python3 ../convert_model.py --mo_options \"--saved_model_dir .\" --input_path gs://tensorflow_model_path/resnet/1/saved_model.pb --output_path gs://tensorflow_model_path/resnet/1"
+COMMAND="convert_model.py --mo_options \"--saved_model_dir .\" --input_path gs://tensorflow_model_path/resnet/1/saved_model.pb --output_path gs://tensorflow_model_path/resnet/1"
docker run --rm -it -v $(pwd)/key.json:/etc/credentials/gcp-key.json \
-e GOOGLE_APPLICATION_CREDENTIALS=/etc/credentials/gcp-key.json $COMMAND
diff --git a/contrib/components/openvino/model_convert/containers/Dockerfile b/contrib/components/openvino/model_convert/containers/Dockerfile
index db8f0f913e8..96ab7d01064 100644
--- a/contrib/components/openvino/model_convert/containers/Dockerfile
+++ b/contrib/components/openvino/model_convert/containers/Dockerfile
@@ -1,10 +1,21 @@
FROM ubuntu:16.04
-RUN apt-get update && apt-get install -y curl ca-certificates python3-pip python-dev libgfortran3 unzip vim
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ curl ca-certificates \
+ python3-pip \
+ python-dev \
+ gcc \
+ python-setuptools \
+ python3-setuptools \
+ libgfortran3 \
+ unzip \
+ vim && \
+ apt-get clean
RUN curl -L -o 2018_R3.tar.gz https://github.com/opencv/dldt/archive/2018_R3.tar.gz && \
tar -zxf 2018_R3.tar.gz && \
rm 2018_R3.tar.gz && \
rm -Rf dldt-2018_R3/inference-engine
WORKDIR dldt-2018_R3/model-optimizer
+RUN pip3 install --upgrade pip setuptools
RUN pip3 install -r requirements.txt
RUN curl -L -o google-cloud-sdk.zip https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \
unzip -qq google-cloud-sdk.zip -d tools && \
@@ -15,8 +26,11 @@ RUN curl -L -o google-cloud-sdk.zip https://dl.google.com/dl/cloudsdk/release/go
tools/google-cloud-sdk/bin/gcloud -q components update \
gcloud core gsutil && \
tools/google-cloud-sdk/bin/gcloud config set component_manager/disable_update_check true && \
- touch tools/google-cloud-sdk/lib/third_party/google.py
+ touch tools/google-cloud-sdk/lib/third_party/google.py && \
+ pip install -U crcmod
+ENV PATH ${PATH}:/dldt-2018_R3/model-optimizer:/dldt-2018_R3/model-optimizer/tools/google-cloud-sdk/bin
COPY convert_model.py .
+RUN chmod 755 *.py
WORKDIR input
diff --git a/contrib/components/openvino/model_convert/containers/convert_model.py b/contrib/components/openvino/model_convert/containers/convert_model.py
index 36b6ba3f61c..74a28c11531 100644
--- a/contrib/components/openvino/model_convert/containers/convert_model.py
+++ b/contrib/components/openvino/model_convert/containers/convert_model.py
@@ -1,7 +1,9 @@
+#!/usr/bin/python3
+
import argparse
import subprocess
import re
-from shutil import copyfile
+import os
def is_insecure_path(path):
@@ -33,7 +35,6 @@ def main():
'--output_path', type=str, help='GCS path of output folder')
args = parser.parse_args()
- bin_path = "../tools/google-cloud-sdk/bin/"
# Validate parameters
if is_insecure_path(args.input_path):
@@ -48,24 +49,28 @@ def main():
print("Invalid model optimizer options")
exit(1)
- # Initialize gsutil creds
- command = bin_path + "gcloud auth activate-service-account " \
+ # Initialize gsutil creds if needed
+ if "GOOGLE_APPLICATION_CREDENTIALS" in os.environ:
+ command = "gcloud auth activate-service-account " \
"--key-file=${GOOGLE_APPLICATION_CREDENTIALS}"
- print("auth command", command)
- return_code = subprocess.call(command, shell=True)
- print("return code", return_code)
+ print("auth command", command)
+ return_code = subprocess.call(command, shell=True)
+ print("return code", return_code)
- # Downloading input model
- command = bin_path + "gsutil cp -r " + args.input_path + " ."
+ # Downloading input model or GCS folder with a model to current folder
+ command = "gsutil cp -r " + args.input_path + " ."
print("gsutil download command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
+ if return_code:
+ exit(1)
# Executing model optimization
- command = "python3 ../mo.py " + args.mo_options
- print("mo command", command)
+ command = "mo.py " + args.mo_options
+ print("Starting model optimization:", command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
+ print("Model optimization output",output.stdout)
XML = ""
BIN = ""
for line in output.stdout.splitlines():
@@ -77,19 +82,25 @@ def main():
print("Error, model optimization failed")
exit(1)
- # copy generated model file to use them as workflow artifacts
- copyfile(BIN, "/tmp/model.bin")
- copyfile(XML, "/tmp/model.xml")
-
- command = bin_path + "gsutil cp " + XML + " " + args.output_path
+ command = "gsutil cp " + XML + " " + os.path.join(args.output_path, os.path.split(XML)[1])
print("gsutil upload command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
- command = bin_path + "gsutil cp " + BIN + " " + args.output_path
+ command = "gsutil cp " + BIN + " " + os.path.join(args.output_path, os.path.split(BIN)[1])
print("gsutil upload command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
+ if return_code:
+ exit(1)
+
+ with open('/tmp/output_path.txt', 'w') as f:
+ f.write(args.output_path)
+ with open('/tmp/bin_path.txt', 'w') as f:
+ f.write(os.path.join(args.output_path, os.path.split(BIN)[1]))
+ with open('/tmp/xml_path.txt', 'w') as f:
+ f.write(os.path.join(args.output_path, os.path.split(XML)[1]))
+ print("Model successfully generated and uploaded to ", args.output_path)
if __name__ == "__main__":
main()
diff --git a/contrib/components/openvino/tf-slim/README.md b/contrib/components/openvino/tf-slim/README.md
new file mode 100644
index 00000000000..411c1521744
--- /dev/null
+++ b/contrib/components/openvino/tf-slim/README.md
@@ -0,0 +1,79 @@
+# Slim models generator
+
+This component is automating implementation of [slim models](https://github.com/tensorflow/models/blob/master/research/slim).
+It can create a graph from slim models zoo, load the variables pre-trained checkpoint and export the model in the form
+of Tensorflow `frozen graph` and `saved model`.
+
+The results of the component can be saved in a local path or in GCS cloud storage. The can be used to other ML pipeline
+components like OpenVINO model optimizer, OpenVINO predict or OpenVINO Model Server.
+
+## Building
+
+```bash
+docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy .
+```
+
+
+## Using the component
+
+```bash
+python slim_model.py --help
+usage: slim_model.py [-h] [--model_name MODEL_NAME] [--export_dir EXPORT_DIR]
+ [--batch_size BATCH_SIZE]
+ [--checkpoint_url CHECKPOINT_URL]
+ [--num_classes NUM_CLASSES]
+
+Slim model generator
+
+optional arguments:
+ -h, --help show this help message and exit
+ --model_name MODEL_NAME
+ --export_dir EXPORT_DIR
+ GCS or local path to save the generated model
+ --batch_size BATCH_SIZE
+ batch size to be used in the exported model
+ --checkpoint_url CHECKPOINT_URL
+ URL to the pretrained compressed checkpoint
+ --num_classes NUM_CLASSES
+ number of model classes
+```
+
+*Model name* can be any model defined in the slim repository. The naming convention needs to match the key name from
+[net_factory.py]()https://github.com/tensorflow/models/blob/master/research/slim/nets/nets_factory.py#L39)
+
+*export dir* can be a local path in the container or it might be GCS path to store generated files:
+- model graph file in pb format
+- frozen graph including weights from the provided checkpoint
+- event file which can be imported in tensorboard
+- saved model which will be stored in subfolder called `1`.
+
+*batch size* represent the batch used in the exported models. It can be natural number to represent fixed batch size
+or `-1` value can be set for dynamic batch size.
+
+*checkpoint_url* is the URL to a pre-trained checkpoint https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models
+It must match the model specified in model_name parameter.
+
+*num classes* should include model specific number of classes in the outputs. For slim models it should be a value
+of `1000` or `1001`. It must match the number of classes used in the requested model name.
+
+
+## Examples
+
+```
+python slim_model.py --model_name mobilenet_v1_050 --export_dir /tmp/mobilnet
+--batch_size 1 --num_classes=1001 \
+--checkpoint_url http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz
+
+python slim_model.py --model_name resnet_v1_50 --export_dir gs:///resnet \
+--batch_size -1 --num_classes=1000 \
+--checkpoint_url http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
+
+python slim_model.py --model_name inception_v4 --export_dir gs:///inception \
+--batch_size -1 --num_classes=1001 \
+--checkpoint_url http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz
+
+python slim_model.py --model_name vgg_19 --export_dir /tmp/vgg \
+--batch_size 1 --num_classes=1000 \
+--checkpoint_url http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz
+```
+
diff --git a/contrib/components/openvino/tf-slim/containers/Dockerfile b/contrib/components/openvino/tf-slim/containers/Dockerfile
new file mode 100644
index 00000000000..8fd56fcb49e
--- /dev/null
+++ b/contrib/components/openvino/tf-slim/containers/Dockerfile
@@ -0,0 +1,74 @@
+FROM intelpython/intelpython3_core as BUILD
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng-dev \
+ libzmq3-dev \
+ pkg-config \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev && \
+ apt-get clean
+
+RUN git clone --depth 1 https://github.com/tensorflow/tensorflow
+
+
+RUN conda create --name myenv -y
+ENV PATH /opt/conda/envs/myenv/bin:$PATH
+
+# Set up Bazel.
+
+
+# Running bazel inside a `docker build` command causes trouble, cf:
+# https://github.com/bazelbuild/bazel/issues/134
+# The easiest solution is to set up a bazelrc file forcing --batch.
+RUN echo "startup --batch" >>/etc/bazel.bazelrc
+# Similarly, we need to workaround sandboxing issues:
+# https://github.com/bazelbuild/bazel/issues/418
+RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
+ >>/etc/bazel.bazelrc
+# Install the most recent bazel release.
+ENV BAZEL_VERSION 0.15.0
+WORKDIR /
+RUN mkdir /bazel && \
+ cd /bazel && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
+ chmod +x bazel-*.sh && \
+ ./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ cd / && \
+ rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
+
+RUN cd tensorflow && bazel build tensorflow/tools/graph_transforms:summarize_graph
+
+FROM intelpython/intelpython3_core as PROD
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git && \
+ apt-get clean
+
+WORKDIR /slim
+
+RUN git clone --depth 1 https://github.com/tensorflow/models && rm -Rf models/.git && \
+ git clone --depth 1 https://github.com/tensorflow/tensorflow && rm -Rf tensorflow/.git
+
+RUN conda create --name myenv -y
+ENV PATH /opt/conda/envs/myenv/bin:$PATH
+
+RUN pip install --no-cache-dir tensorflow validators google-cloud-storage
+ENV PYTHONPATH=models/research/slim:tensorflow/python/tools
+
+COPY --from=BUILD /tensorflow/bazel-bin/tensorflow/tools/graph_transforms/summarize_graph summarize_graph
+COPY --from=BUILD /root/.cache/bazel/_bazel_root/*/execroot/org_tensorflow/bazel-out/k8-opt/bin/_solib_k8/_U_S_Stensorflow_Stools_Sgraph_Utransforms_Csummarize_Ugraph___Utensorflow/libtensorflow_framework.so libtensorflow_framework.so
+COPY slim_model.py .
+
+
+
diff --git a/contrib/components/openvino/tf-slim/containers/slim_model.py b/contrib/components/openvino/tf-slim/containers/slim_model.py
new file mode 100644
index 00000000000..2942d409e7a
--- /dev/null
+++ b/contrib/components/openvino/tf-slim/containers/slim_model.py
@@ -0,0 +1,206 @@
+import tensorflow as tf
+from tensorflow.python.saved_model import signature_constants
+from tensorflow.python.saved_model import tag_constants
+from nets import nets_factory
+from tensorflow.python.platform import gfile
+import argparse
+import validators
+import os
+import requests
+import tarfile
+from subprocess import Popen, PIPE
+import shutil
+import glob
+import re
+from tensorflow.python.tools.freeze_graph import freeze_graph
+from tensorflow.python.tools.saved_model_cli import _show_all
+from urllib.parse import urlparse
+from shutil import copyfile
+from google.cloud import storage
+
+
+def upload_to_gcs(src, dst):
+ parsed_path = urlparse(dst)
+ bucket_name = parsed_path.netloc
+ file_path = parsed_path.path[1:]
+ gs_client = storage.Client()
+ bucket = gs_client.get_bucket(bucket_name)
+ blob = bucket.blob(file_path)
+ blob.upload_from_filename(src)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description='Slim model generator')
+ parser.add_argument('--model_name', type=str,
+ help='')
+ parser.add_argument('--export_dir', type=str, default="/tmp/export_dir",
+ help='GCS or local path to save graph files')
+ parser.add_argument('--saved_model_dir', type=str,
+ help='GCS or local path to save the generated model')
+ parser.add_argument('--batch_size', type=str, default=1,
+ help='batch size to be used in the exported model')
+ parser.add_argument('--checkpoint_url', type=str,
+ help='URL to the pretrained compressed checkpoint')
+ parser.add_argument('--num_classes', type=int, default=1000,
+ help='number of model classes')
+ args = parser.parse_args()
+
+ MODEL = args.model_name
+ URL = args.checkpoint_url
+ if not validators.url(args.checkpoint_url):
+ print('use a valid URL parameter')
+ exit(1)
+ TMP_DIR = "/tmp/slim_tmp"
+ NUM_CLASSES = args.num_classes
+ BATCH_SIZE = args.batch_size
+ MODEL_FILE_NAME = URL.rsplit('/', 1)[-1]
+ EXPORT_DIR = args.export_dir
+ SAVED_MODEL_DIR = args.saved_model_dir
+
+ tmp_graph_file = os.path.join(TMP_DIR, MODEL + '_graph.pb')
+ export_graph_file = os.path.join(EXPORT_DIR, MODEL + '_graph.pb')
+ frozen_file = os.path.join(EXPORT_DIR, 'frozen_graph_' + MODEL + '.pb')
+
+ if not os.path.exists(TMP_DIR):
+ os.makedirs(TMP_DIR)
+
+ if not os.path.exists(TMP_DIR + '/' + MODEL_FILE_NAME):
+ print("Downloading and decompressing the model checkpoint...")
+ response = requests.get(URL, stream=True)
+ with open(os.path.join(TMP_DIR, MODEL_FILE_NAME), 'wb') as output:
+ output.write(response.content)
+ tar = tarfile.open(os.path.join(TMP_DIR, MODEL_FILE_NAME))
+ tar.extractall(path=TMP_DIR)
+ tar.close()
+ print("Model checkpoint downloaded and decompressed to:", TMP_DIR)
+ else:
+ print("Reusing existing model file ",
+ os.path.join(TMP_DIR, MODEL_FILE_NAME))
+
+ checkpoint = glob.glob(TMP_DIR + '/*.ckpt*')
+ print("checkpoint", checkpoint)
+ if len(checkpoint) > 0:
+ m = re.match(r"([\S]*.ckpt)", checkpoint[-1])
+ print("checkpoint match", m)
+ checkpoint = m[0]
+ print(checkpoint)
+ else:
+ print("checkpoint file not detected in " + URL)
+ exit(1)
+
+ print("Saving graph def file")
+ with tf.Graph().as_default() as graph:
+
+ network_fn = nets_factory.get_network_fn(MODEL,
+ num_classes=NUM_CLASSES,
+ is_training=False)
+ image_size = network_fn.default_image_size
+ if BATCH_SIZE == "None" or BATCH_SIZE == "-1":
+ batchsize = None
+ else:
+ batchsize = BATCH_SIZE
+ placeholder = tf.placeholder(name='input', dtype=tf.float32,
+ shape=[batchsize, image_size,
+ image_size, 3])
+ network_fn(placeholder)
+ graph_def = graph.as_graph_def()
+
+ with gfile.GFile(tmp_graph_file, 'wb') as f:
+ f.write(graph_def.SerializeToString())
+ if urlparse(EXPORT_DIR).scheme == 'gs':
+ upload_to_gcs(tmp_graph_file, export_graph_file)
+ elif urlparse(EXPORT_DIR).scheme == '':
+ if not os.path.exists(EXPORT_DIR):
+ os.makedirs(EXPORT_DIR)
+ copyfile(tmp_graph_file, export_graph_file)
+ else:
+ print("Invalid format of model export path")
+ print("Graph file saved to ",
+ os.path.join(EXPORT_DIR, MODEL + '_graph.pb'))
+
+ print("Analysing graph")
+ p = Popen("./summarize_graph --in_graph=" + tmp_graph_file +
+ " --print_structure=false", shell=True, stdout=PIPE, stderr=PIPE)
+ summary, err = p.communicate()
+ inputs = []
+ outputs = []
+ for line in summary.split(b'\n'):
+ line_str = line.decode()
+ if re.match(r"Found [\d]* possible inputs", line_str) is not None:
+ print("in", line)
+ m = re.findall(r'name=[\S]*,', line.decode())
+ for match in m:
+ print("match", match)
+ input = match[5:-1]
+ inputs.append(input)
+ print("inputs", inputs)
+
+ if re.match(r"Found [\d]* possible outputs", line_str) is not None:
+ print("out", line)
+ m = re.findall(r'name=[\S]*,', line_str)
+ for match in m:
+ print("match", match)
+ output = match[5:-1]
+ outputs.append(output)
+ print("outputs", outputs)
+
+ output_node_names = ",".join(outputs)
+ print("Creating freezed graph based on pretrained checkpoint")
+ freeze_graph(input_graph=tmp_graph_file,
+ input_checkpoint=checkpoint,
+ input_binary=True,
+ clear_devices=True,
+ input_saver='',
+ output_node_names=output_node_names,
+ restore_op_name="save/restore_all",
+ filename_tensor_name="save/Const:0",
+ output_graph=frozen_file,
+ initializer_nodes="")
+ if urlparse(SAVED_MODEL_DIR).scheme == '' and \
+ os.path.exists(SAVED_MODEL_DIR):
+ shutil.rmtree(SAVED_MODEL_DIR)
+
+ builder = tf.saved_model.builder.SavedModelBuilder(SAVED_MODEL_DIR)
+
+ with tf.gfile.GFile(frozen_file, "rb") as f:
+ graph_def = tf.GraphDef()
+ graph_def.ParseFromString(f.read())
+
+ sigs = {}
+
+ with tf.Session(graph=tf.Graph()) as sess:
+ tf.import_graph_def(graph_def, name="")
+ g = tf.get_default_graph()
+ inp_dic = {}
+ for inp in inputs:
+ inp_t = g.get_tensor_by_name(inp+":0")
+ inp_dic[inp] = inp_t
+ out_dic = {}
+ for out in outputs:
+ out_t = g.get_tensor_by_name(out+":0")
+ out_dic[out] = out_t
+
+ sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
+ tf.saved_model.signature_def_utils.predict_signature_def(
+ inp_dic, out_dic)
+
+ builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING],
+ signature_def_map=sigs)
+ print("Exporting saved model to:", SAVED_MODEL_DIR + ' ...')
+ builder.save()
+
+ print("Saved model exported to:", SAVED_MODEL_DIR)
+ _show_all(SAVED_MODEL_DIR)
+ pb_visual_writer = tf.summary.FileWriter(EXPORT_DIR)
+ pb_visual_writer.add_graph(sess.graph)
+ print("Visualize the model by running: "
+ "tensorboard --logdir={}".format(SAVED_MODEL_DIR))
+ with open('/tmp/saved_model_dir.txt', 'w') as f:
+ f.write(SAVED_MODEL_DIR)
+ with open('/tmp/export_dir.txt', 'w') as f:
+ f.write(EXPORT_DIR)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/contrib/samples/openvino/model_optimizer/convert_model_pipeline.py b/contrib/samples/openvino/model_optimizer/convert_model_pipeline.py
index 85fcbfa6ac7..e0f6d101b31 100644
--- a/contrib/samples/openvino/model_optimizer/convert_model_pipeline.py
+++ b/contrib/samples/openvino/model_optimizer/convert_model_pipeline.py
@@ -15,12 +15,12 @@ def download_optimize_and_upload(
dsl.ContainerOp(
name='mo',
image='',
- command=['python3', '../convert_model.py'],
+ command=['convert_model.py'],
arguments=[
'--input_path', input_path,
'--output_path', output_path,
'--mo_options', mo_options],
- file_outputs={})
+ file_outputs={'output': '/tmp/output_path.txt'})
if __name__ == '__main__':
diff --git a/contrib/samples/openvino/tf-slim/README.md b/contrib/samples/openvino/tf-slim/README.md
new file mode 100644
index 00000000000..7a427dfe943
--- /dev/null
+++ b/contrib/samples/openvino/tf-slim/README.md
@@ -0,0 +1,23 @@
+# Model optimization for tensorflow slim models
+
+
+This pipeline links generating Tensorflow slim models with OpenVINO model optimization.
+
+
+## Examples of the parameters
+
+model_name - resnet_v1_50
+checkpoint_url - http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
+batch_size - 8
+num_classes - 1000
+saved_model_dir - gs:///resnet/1
+export_dir - /tmp/export
+
+
+model_name - inception_v4
+checkpoint_url - http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz
+batch_size - -1
+num_classes - 1001
+saved_model_dir - gs:///inception/1
+export_dir - gs:///inception_files/1
+
diff --git a/contrib/samples/openvino/tf-slim/tf-slim.py b/contrib/samples/openvino/tf-slim/tf-slim.py
new file mode 100644
index 00000000000..e1089ff3175
--- /dev/null
+++ b/contrib/samples/openvino/tf-slim/tf-slim.py
@@ -0,0 +1,43 @@
+import kfp.dsl as dsl
+
+
+@dsl.pipeline(
+ name='Prediction pipeline',
+ description='Generate slim models and optimize them with OpenVINO'
+)
+def tf_slim_optimize(
+ model_name: dsl.PipelineParam,
+ num_classes: dsl.PipelineParam,
+ checkpoint_url: dsl.PipelineParam,
+ batch_size: dsl.PipelineParam,
+ export_dir: dsl.PipelineParam,
+ saved_model_dir: dsl.PipelineParam,
+ mo_options: dsl.PipelineParam):
+
+ slim = dsl.ContainerOp(
+ name='tf-slim',
+ image='',
+ command=['python', 'slim_model.py'],
+ arguments=[
+ '--model_name', model_name,
+ '--batch_size', batch_size,
+ '--checkpoint_url', checkpoint_url,
+ '--num_classes', num_classes,
+ '--saved_model_dir', saved_model_dir,
+ '--export_dir', export_dir],
+ file_outputs={'saved-model-dir': '/tmp/saved_model_dir.txt'})
+
+ dsl.ContainerOp(
+ name='tf-slim',
+ image='',
+ command=['convert_model.py'],
+ arguments=[
+ '--input_path', '%s/saved_model.pb' % slim.output,
+ '--mo_options', mo_options,
+ '--output_path', slim.output],
+ file_outputs={})
+
+
+if __name__ == '__main__':
+ import kfp.compiler as compiler
+ compiler.Compiler().compile(tf_slim_optimize, __file__ + '.tar.gz')