diff --git a/samples/README.md b/samples/README.md
index e4d453d9003..1558a1469a4 100644
--- a/samples/README.md
+++ b/samples/README.md
@@ -75,11 +75,11 @@ For better readability and integrations with the sample test infrastructure, sam
* The sample file should be either `*.py` or `*.ipynb`, and its file name is consistent with its directory name.
* For `*.py` sample, it's recommended to have a main invoking `kfp.compiler.Compiler().compile()` to compile the
pipeline function into pipeline yaml spec.
-* For `*.ipynb` sample, parameters (e.g., `experiment_name` and `project_name`)
+* For `*.ipynb` sample, parameters (e.g., `project_name`)
should be defined in a dedicated cell and tagged as parameter.
(If the author would like the sample test infra to run it by setting the `run_pipeline` flag to True in
-the associated `config.yaml` file, the sample test infra will expect a parameter `experiment_name`
-to inject so that it can run in the sample test experiment.)
+the associated `config.yaml` file, the sample test infra will expect the sample to use the
+`kfp.Client().create_run_from_pipeline_func` method for starting the run so that the sample test can watch the run.)
Detailed guideline is
[here](https://github.com/nteract/papermill). Also, all the environment setup and
preparation should be within the notebook, such as by `!pip install packages`
diff --git a/samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb b/samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
index 1db4df1270b..353001f2ecd 100644
--- a/samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
+++ b/samples/contrib/arena-samples/standalonejob/standalone_pipeline.ipynb
@@ -48,7 +48,6 @@
"\n",
"**Please fill in the below environment variables with you own settings.**\n",
"\n",
- "- **EXPERIMENT_NAME**: A unique experiment name that will be created for this notebook demo.\n",
"- **KFP_PACKAGE**: The latest release of kubeflow pipeline platform library.\n",
"- **KUBEFLOW_PIPELINE_LINK**: The link to access the KubeFlow pipeline API.\n",
"- **MOUNT**: The mount configuration to map data above into the training job. The format is 'data:/directory'\n",
@@ -61,8 +60,6 @@
"metadata": {},
"outputs": [],
"source": [
- "EXPERIMENT_NAME = 'myjob'\n",
- "RUN_ID=\"run\"\n",
"KFP_SERVICE=\"ml-pipeline.kubeflow.svc.cluster.local:8888\"\n",
"KFP_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp/0.1.14/kfp.tar.gz'\n",
"KFP_ARENA_PACKAGE = 'http://kubeflow.oss-cn-beijing.aliyuncs.com/kfp-arena/kfp-arena-0.3.tar.gz'\n",
diff --git a/samples/contrib/ibm-samples/ffdl-seldon/ffdl_pipeline.ipynb b/samples/contrib/ibm-samples/ffdl-seldon/ffdl_pipeline.ipynb
index 3dcf85e772f..44f41334dd3 100644
--- a/samples/contrib/ibm-samples/ffdl-seldon/ffdl_pipeline.ipynb
+++ b/samples/contrib/ibm-samples/ffdl-seldon/ffdl_pipeline.ipynb
@@ -90,10 +90,7 @@
"# KUBEFLOW_PIPELINE_LINK = ''\n",
"# client = kfp.Client(KUBEFLOW_PIPELINE_LINK)\n",
"\n",
- "client = kfp.Client()\n",
- "\n",
- "\n",
- "EXPERIMENT_NAME = 'FfDL-Seldon Experiments'"
+ "client = kfp.Client()\n"
]
},
{
@@ -179,7 +176,7 @@
" 'model-class-file': 'gender_classification.py'}\n",
"\n",
"\n",
- "run = client.create_run_from_pipeline_func(ffdlPipeline, arguments=parameters, experiment_name=EXPERIMENT_NAME).run_info\n",
+ "run = client.create_run_from_pipeline_func(ffdlPipeline, arguments=parameters).run_info\n",
"\n",
"import IPython\n",
"html = ('
'\n",
diff --git a/samples/contrib/image-captioning-gcp/Image Captioning TF 2.0.ipynb b/samples/contrib/image-captioning-gcp/Image Captioning TF 2.0.ipynb
index 0d72d06b1bd..5a8a27259a5 100644
--- a/samples/contrib/image-captioning-gcp/Image Captioning TF 2.0.ipynb
+++ b/samples/contrib/image-captioning-gcp/Image Captioning TF 2.0.ipynb
@@ -135,7 +135,6 @@
"outputs": [],
"source": [
"# Kubeflow project settings\n",
- "EXPERIMENT_NAME = 'Image Captioning'\n",
"PROJECT_NAME = '[YOUR-PROJECT-NAME]' \n",
"PIPELINE_STORAGE_PATH = GCS_BUCKET + '/ms-coco/components' # path to save pipeline component images\n",
"BASE_IMAGE = 'gcr.io/%s/img-cap:latest' % PROJECT_NAME # using image created in README instructions\n",
@@ -913,7 +912,7 @@
" 'training_batch_size': 16, # has to be smaller since only training on 80/100 examples \n",
"}\n",
"\n",
- "kfp.Client().create_run_from_pipeline_func(pipeline, arguments=arguments, experiment_name=EXPERIMENT_NAME)"
+ "kfp.Client().create_run_from_pipeline_func(pipeline, arguments=arguments)"
]
},
{
diff --git a/samples/core/ai_platform/ai_platform.ipynb b/samples/core/ai_platform/ai_platform.ipynb
index 4bcf6924fe7..709a78cd6c6 100644
--- a/samples/core/ai_platform/ai_platform.ipynb
+++ b/samples/core/ai_platform/ai_platform.ipynb
@@ -31,7 +31,7 @@
"%%capture\n",
"\n",
"# Install the SDK (Uncomment the code if the SDK is not installed before)\n",
- "!python3 -m pip install kfp --upgrade -q\n",
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n",
"!python3 -m pip install pandas --upgrade -q"
]
},
@@ -79,8 +79,7 @@
"source": [
"# Required Parameters\n",
"project_id = ''\n",
- "output = 'gs://' # No ending slash\n",
- "experiment_name = 'Chicago Crime Prediction'"
+ "output = 'gs://' # No ending slash\n"
]
},
{
@@ -280,7 +279,7 @@
"metadata": {},
"outputs": [],
"source": [
- "pipeline = kfp.Client().create_run_from_pipeline_func(pipeline, arguments={}, experiment_name=experiment_name)"
+ "pipeline = kfp.Client().create_run_from_pipeline_func(pipeline, arguments={})"
]
},
{
diff --git a/samples/core/component_build/component_build.ipynb b/samples/core/component_build/component_build.ipynb
index 297945fce6d..221d5659b11 100644
--- a/samples/core/component_build/component_build.ipynb
+++ b/samples/core/component_build/component_build.ipynb
@@ -30,7 +30,7 @@
"outputs": [],
"source": [
"# Install Pipeline SDK - This only needs to be ran once in the enviroment. \n",
- "!pip3 install kfp --upgrade --quiet"
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n"
]
},
{
@@ -65,7 +65,6 @@
},
"outputs": [],
"source": [
- "experiment_name = 'container_building'"
]
},
{
@@ -202,7 +201,7 @@
"outputs": [],
"source": [
"arguments = {'a': '7', 'b': '8'}\n",
- "kfp.Client().create_run_from_pipeline_func(pipeline_func=calc_pipeline, arguments=arguments, experiment_name=experiment_name)\n",
+ "kfp.Client().create_run_from_pipeline_func(pipeline_func=calc_pipeline, arguments=arguments)\n",
"\n",
"# This should output link that leads to the run information page. \n",
"# Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working"
diff --git a/samples/core/dataflow/dataflow.ipynb b/samples/core/dataflow/dataflow.ipynb
index 241ccbf0dff..b17787bedbf 100644
--- a/samples/core/dataflow/dataflow.ipynb
+++ b/samples/core/dataflow/dataflow.ipynb
@@ -74,8 +74,7 @@
"outputs": [],
"source": [
"project = 'Input your PROJECT ID'\n",
- "output = 'Input your GCS bucket name' # No ending slash\n",
- "experiment_name = 'Dataflow - Launch Python'"
+ "output = 'Input your GCS bucket name' # No ending slash\n"
]
},
{
@@ -95,8 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
- "%%capture --no-stderr\n",
- "!pip3 install kfp --upgrade"
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n"
]
},
{
@@ -368,7 +366,7 @@
}
],
"source": [
- "kfp.Client().create_run_from_pipeline_func(pipeline, arguments={}, experiment_name=experiment_name)"
+ "kfp.Client().create_run_from_pipeline_func(pipeline, arguments={})"
]
},
{
diff --git a/samples/core/dsl_static_type_checking/dsl_static_type_checking.ipynb b/samples/core/dsl_static_type_checking/dsl_static_type_checking.ipynb
index 64ef3b6bb9c..32cc93a4bd0 100644
--- a/samples/core/dsl_static_type_checking/dsl_static_type_checking.ipynb
+++ b/samples/core/dsl_static_type_checking/dsl_static_type_checking.ipynb
@@ -162,7 +162,7 @@
}
],
"source": [
- "!pip3 install kfp --upgrade"
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n"
]
},
{
diff --git a/samples/core/kubeflow_tf_serving/kubeflow_tf_serving.ipynb b/samples/core/kubeflow_tf_serving/kubeflow_tf_serving.ipynb
index d830a6507b8..96c86047ca5 100644
--- a/samples/core/kubeflow_tf_serving/kubeflow_tf_serving.ipynb
+++ b/samples/core/kubeflow_tf_serving/kubeflow_tf_serving.ipynb
@@ -132,7 +132,7 @@
],
"source": [
"# Install Pipeline SDK - This only needs to be ran once in the enviroment. \n",
- "!pip3 install kfp --upgrade\n",
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n",
"!pip3 install tensorflow==1.14 --upgrade"
]
},
@@ -172,7 +172,6 @@
"# Set your output and project. !!!Must Do before you can proceed!!!\n",
"project = 'Your-Gcp-Project-ID' #'Your-GCP-Project-ID'\n",
"model_name = 'model-name' # Model name matching TF_serve naming requirements \n",
- "experiment_name = 'serving_component'\n",
"import time\n",
"ts = int(time.time())\n",
"model_version = str(ts) # Here we use timestamp as version to avoid conflict \n",
@@ -323,7 +322,7 @@
}
],
"source": [
- "kfp.Client().create_run_from_pipeline_func(model_server, arguments={}, experiment_name=experiment_name)\n",
+ "kfp.Client().create_run_from_pipeline_func(model_server, arguments={})\n",
"\n",
"#vvvvvvvvv This link leads to the run information page. (Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working)"
]
diff --git a/samples/core/lightweight_component/lightweight_component.ipynb b/samples/core/lightweight_component/lightweight_component.ipynb
index b0465f152f9..8292839bec5 100644
--- a/samples/core/lightweight_component/lightweight_component.ipynb
+++ b/samples/core/lightweight_component/lightweight_component.ipynb
@@ -29,7 +29,6 @@
},
"outputs": [],
"source": [
- "experiment_name = 'lightweight python components'"
]
},
{
@@ -39,7 +38,7 @@
"outputs": [],
"source": [
"# Install the SDK\n",
- "!pip3 install kfp --upgrade"
+ "#!pip3 install 'kfp>=0.1.31.2' --quiet"
]
},
{
@@ -243,7 +242,7 @@
"arguments = {'a': '7', 'b': '8'}\n",
"\n",
"#Submit a pipeline run\n",
- "kfp.Client().create_run_from_pipeline_func(calc_pipeline, arguments=arguments, experiment_name=experiment_name)\n",
+ "kfp.Client().create_run_from_pipeline_func(calc_pipeline, arguments=arguments)\n",
"\n",
"#vvvvvvvvv This link leads to the run information page. (Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working)"
]
diff --git a/samples/core/multiple_outputs/multiple_outputs.ipynb b/samples/core/multiple_outputs/multiple_outputs.ipynb
index 61a7dd0b522..db5ee0cc1f8 100644
--- a/samples/core/multiple_outputs/multiple_outputs.ipynb
+++ b/samples/core/multiple_outputs/multiple_outputs.ipynb
@@ -30,7 +30,7 @@
},
"outputs": [],
"source": [
- "!pip install kfp --upgrade"
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n"
]
},
{
@@ -51,8 +51,7 @@
"outputs": [],
"source": [
"output = 'gs://[BUCKET-NAME]' # GCS bucket name\n",
- "project_id = '[PROJECT-NAME]' # GCP project name\n",
- "experiment_name = 'Multiple Outputs Sample'"
+ "project_id = '[PROJECT-NAME]' # GCP project name\n"
]
},
{
@@ -161,7 +160,7 @@
" 'b': 2.5,\n",
" 'c': 3.0,\n",
"}\n",
- "run_result = kfp.Client().create_run_from_pipeline_func(pipeline, arguments=arguments, experiment_name=experiment_name)"
+ "run_result = kfp.Client().create_run_from_pipeline_func(pipeline, arguments=arguments)"
]
}
],
diff --git a/samples/core/tfx-oss/TFX Example.ipynb b/samples/core/tfx-oss/TFX Example.ipynb
index de50af35cad..770d35f56f0 100644
--- a/samples/core/tfx-oss/TFX Example.ipynb
+++ b/samples/core/tfx-oss/TFX Example.ipynb
@@ -18,7 +18,7 @@
"outputs": [],
"source": [
"!pip3 install tfx==0.13.0 --upgrade\n",
- "!pip3 install kfp --upgrade"
+ "!python3 -m pip install 'kfp>=0.1.31' --quiet\n"
]
},
{
diff --git a/sdk/python/kfp/_client.py b/sdk/python/kfp/_client.py
index 2bf095d2217..e1eda00e1dd 100644
--- a/sdk/python/kfp/_client.py
+++ b/sdk/python/kfp/_client.py
@@ -64,6 +64,8 @@ def camel_case_to_snake_case(name):
KF_PIPELINES_ENDPOINT_ENV = 'KF_PIPELINES_ENDPOINT'
KF_PIPELINES_UI_ENDPOINT_ENV = 'KF_PIPELINES_UI_ENDPOINT'
+KF_PIPELINES_DEFAULT_EXPERIMENT_NAME = 'KF_PIPELINES_DEFAULT_EXPERIMENT_NAME'
+KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME = 'KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'
class Client(object):
""" API Client for KubeFlow Pipeline.
@@ -365,7 +367,12 @@ def __str__(self):
#TODO: Check arguments against the pipeline function
pipeline_name = os.path.basename(pipeline_file)
- experiment_name = experiment_name or 'Default'
+ experiment_name = experiment_name or os.environ.get(KF_PIPELINES_DEFAULT_EXPERIMENT_NAME, None)
+ overridden_experiment_name = os.environ.get(KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME, experiment_name)
+ if overridden_experiment_name != experiment_name:
+ import warnings
+ warnings.warn('Changing experiment name from "{}" to "{}".'.format(experiment_name, overridden_experiment_name))
+ experiment_name = overridden_experiment_name or 'Default'
run_name = run_name or pipeline_name + ' ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')
experiment = self.create_experiment(name=experiment_name)
run_info = self.run_pipeline(experiment.id, run_name, pipeline_file, arguments)
diff --git a/test/sample-test/check_notebook_results.py b/test/sample-test/check_notebook_results.py
index 90420971050..2d3dc3db3e0 100644
--- a/test/sample-test/check_notebook_results.py
+++ b/test/sample-test/check_notebook_results.py
@@ -21,26 +21,27 @@
class NoteBookChecker(object):
- def __init__(self, testname, result, run_pipeline, namespace='kubeflow'):
+ def __init__(self, testname, result, run_pipeline, experiment_name, namespace='kubeflow'):
""" Util class for checking notebook sample test running results.
:param testname: test name in the json xml.
:param result: name of the file that stores the test result
:param run_pipeline: whether to submit for a pipeline run.
:param namespace: where the pipeline system is deployed.
+ :param experiment_name: Name of the experiment to monitor
"""
self._testname = testname
self._result = result
self._exit_code = None
self._run_pipeline = run_pipeline
self._namespace = namespace
+ self._experiment_name = experiment_name
def run(self):
""" Run the notebook sample as a python script. """
self._exit_code = str(
subprocess.call(['ipython', '%s.py' % self._testname]))
-
def check(self):
""" Check the pipeline running results of the notebook sample. """
test_cases = []
@@ -63,7 +64,7 @@ def check(self):
test_timeout = raw_args['test_timeout']
if self._run_pipeline:
- experiment = self._testname + '-test'
+ experiment = self._experiment_name
###### Initialization ######
host = 'ml-pipeline.%s.svc.cluster.local:8888' % self._namespace
client = Client(host=host)
diff --git a/test/sample-test/configs/ai_platform.config.yaml b/test/sample-test/configs/ai_platform.config.yaml
index 13be31e190a..6f1ee86b466 100644
--- a/test/sample-test/configs/ai_platform.config.yaml
+++ b/test/sample-test/configs/ai_platform.config.yaml
@@ -16,4 +16,3 @@ test_name: ai_platform
notebook_params:
output:
project_id: ml-pipeline-test
- experiment_name: ai_platform-test
diff --git a/test/sample-test/configs/component_build.config.yaml b/test/sample-test/configs/component_build.config.yaml
index b74b08ac6ab..39f95d6582b 100644
--- a/test/sample-test/configs/component_build.config.yaml
+++ b/test/sample-test/configs/component_build.config.yaml
@@ -14,5 +14,4 @@
test_name: component_build
notebook_params:
- experiment_name: component_build-test
PROJECT_NAME: ml-pipeline-test
diff --git a/test/sample-test/configs/dataflow.config.yaml b/test/sample-test/configs/dataflow.config.yaml
index 28a853bf9fd..ddcca910761 100644
--- a/test/sample-test/configs/dataflow.config.yaml
+++ b/test/sample-test/configs/dataflow.config.yaml
@@ -16,5 +16,4 @@ test_name: dataflow
notebook_params:
output:
project: ml-pipeline-test
- experiment_name: dataflow-test
run_pipeline: False
\ No newline at end of file
diff --git a/test/sample-test/configs/kubeflow_tf_serving.config.yaml b/test/sample-test/configs/kubeflow_tf_serving.config.yaml
index c2fc5d3859e..0692aa9f0a0 100644
--- a/test/sample-test/configs/kubeflow_tf_serving.config.yaml
+++ b/test/sample-test/configs/kubeflow_tf_serving.config.yaml
@@ -16,4 +16,3 @@ test_name: kubeflow_tf_serving
notebook_params:
output:
project: ml-pipeline-test
- experiment_name: kubeflow_tf_serving-test
\ No newline at end of file
diff --git a/test/sample-test/configs/multiple_outputs.config.yaml b/test/sample-test/configs/multiple_outputs.config.yaml
index 00f400f68ec..3b55b6b470a 100644
--- a/test/sample-test/configs/multiple_outputs.config.yaml
+++ b/test/sample-test/configs/multiple_outputs.config.yaml
@@ -16,4 +16,3 @@ test_name: multiple_outputs
notebook_params:
output:
project_id: ml-pipeline-test
- experiment_name: multiple_outputs-test
\ No newline at end of file
diff --git a/test/sample-test/run_sample_test.py b/test/sample-test/run_sample_test.py
index 5eed429add6..28e3f0cb8e6 100644
--- a/test/sample-test/run_sample_test.py
+++ b/test/sample-test/run_sample_test.py
@@ -25,7 +25,7 @@
class PySampleChecker(object):
- def __init__(self, testname, input, output, result, namespace='kubeflow'):
+ def __init__(self, testname, input, output, result, experiment_name, namespace='kubeflow'):
"""Util class for checking python sample test running results.
:param testname: test name.
@@ -33,8 +33,10 @@ def __init__(self, testname, input, output, result, namespace='kubeflow'):
:param output: The path of the test output.
:param result: The path of the test result that will be exported.
:param namespace: namespace of the deployed pipeline system. Default: kubeflow
+ :param experiment_name: Name of the experiment to monitor
"""
self._testname = testname
+ self._experiment_name = experiment_name
self._input = input
self._output = output
self._result = result
@@ -68,8 +70,7 @@ def run(self):
exit(1)
###### Create Experiment ######
- experiment_name = self._testname + ' sample experiment'
- response = self._client.create_experiment(experiment_name)
+ response = self._client.create_experiment(self._experiment_name)
self._experiment_id = response.id
utils.add_junit_test(self._test_cases, 'create experiment', True)
diff --git a/test/sample-test/sample_test_launcher.py b/test/sample-test/sample_test_launcher.py
index c8b5bb8d0b5..06f20eea340 100644
--- a/test/sample-test/sample_test_launcher.py
+++ b/test/sample-test/sample_test_launcher.py
@@ -128,9 +128,6 @@ def _compile(self):
if 'run_pipeline' in raw_args.keys():
self._run_pipeline = raw_args['run_pipeline']
- if self._run_pipeline:
- nb_params['experiment_name'] = self._test_name + '-test'
-
pm.execute_notebook(
input_path='%s.ipynb' % self._test_name,
output_path='%s.ipynb' % self._test_name,
@@ -158,10 +155,16 @@ def run_test(self):
self._compile()
self._injection()
+ # Overriding the experiment name of pipeline runs
+ experiment_name = self._test_name + '-test'
+ os.environ['KF_PIPELINES_OVERRIDE_EXPERIMENT_NAME'] = experiment_name
+
if self._is_notebook:
nbchecker = NoteBookChecker(testname=self._test_name,
result=self._sample_test_result,
- run_pipeline=self._run_pipeline)
+ run_pipeline=self._run_pipeline,
+ experiment_name=experiment_name,
+ )
nbchecker.run()
os.chdir(TEST_DIR)
nbchecker.check()
@@ -176,7 +179,9 @@ def run_test(self):
input=input_file,
output=self._sample_test_output,
result=self._sample_test_result,
- namespace=self._namespace)
+ namespace=self._namespace,
+ experiment_name=experiment_name,
+ )
pysample_checker.run()
pysample_checker.check()